ngram
listlengths
0
67.8k
[ "Eye-space Z position of the far clipping planes. depth_type: A string representing the", "# pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face", "Unless required by applicable law or agreed to in writing, software # distributed", "= [] for view_group_index, absolute_position in enumerate(camera_positions): views = [] for face in", "-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name", "A list of 3D points (each a list of 3 floats), representing the", "far) / (near - far) f = (2.0 * near * far) /", "# pylint: disable=undefined-variable if face_name is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name", "of three floats representing the upper bounds of the headbox in world-space. num_view_groups:", "number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path", "return [a, 0.0, c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0, e, f,", "position in the box. Returns: A list of floats, representing the absolute position", "a headbox. Camera posittions are computed as a 3D Hammersley point set. The", "position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point,", "exactly equal to the headbox. The points are then sorted according to distance", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'right': return", "1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name", "far_clip: Eye-space Z position of the far clipping planes. depth_type: A string representing", "cube face, are generated. Each camera is configured with a square viewport and", "cameras. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z", "representing the first point. point_b: A list of numbers representing the second point.", "/= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda", "(face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera': camera,", "0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face", "the face. Must be one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns:", "face is not a valid cube map face name. \"\"\" # Disable the", "elif face_name is 'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0,", "'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0,", "bottom = -near top = near a = (2.0 * near) / (right", "distance_sqr = 0.0 for element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def", "\"\"\" reversed_digits = 0 base_n = 1 # Compute the reversed digits, base", "aribtrary number of dimensions. Args: point_a: A list of numbers representing the first", "} } } views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return the view_groups", "face_name is not the name of a cube map face. \"\"\" # pylint:", "to eye). depth_channel_name: Name of the depth channel in the output file. Commonly", "'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output JSON manifest file.", "headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a", "# Return the view_groups as a Python list. return view_groups def CreateRig(headbox_min, headbox_max,", "inverse of |a| in base |base|. Args: a: The integer number for which", "def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside a box with arbitrary", "d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable", "position of the far clipping plane. Returns: The clip-from-eye matrix as a list", "# point.w = 1.0 implicitly result_hom[row] += matrix[4 * row + 3] w", "json import math import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using", "0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0 for element in", "(camera positions) to generate. Must be a power of two. image_size: Resolution of", "0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "for the samples. sample = [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3)", "(top - bottom) e = (near + far) / (near - far) f", "an OpenGL-style projection matrix. Args: near: Eye-space Z position of the near clipping", "adjusts the Maya timeline to exactly contain the frames for the rig animation.", "'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z", "# Generate the six Maya cameras and keyframe their positions. if not json_only:", "2), RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample)", "0.0, b, d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0] #", "images in pixels. near_clip: Eye-space Z position of the near clipping planes. far_clip:", "image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups", "sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the point closest", "Distance(point, headbox_center)) # Replace the point closest to the headbox center by the", "+ '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name", "camera rig and JSON manifest for Seurat. Args: headbox_min: List of three floats", "0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions =", "= 1.0 implicitly result_hom[row] += matrix[4 * row + 3] w = result_hom[3]", "is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample =", "Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "that is closest to the headbox center is replaced by the headbox center", "points. The points can have an aribtrary number of dimensions. Args: point_a: A", "camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name,", "depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': {", "for the rig animation. Each of the six cameras will get one keyframe", "'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create", "the linter runs. # # pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions)", "file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the view", "projection matrix. Args: near: Eye-space Z position of the near clipping plane. far:", "[0.0, 0.0, 0.0, 0.0] for row in xrange(4): for col in xrange(3): result_hom[row]", "depth_type: A string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z", "xrange(num_cameras): # Use a 3D Hammersley point set for the samples. sample =", "A list of numbers representing the first point. point_b: A list of numbers", "maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif", "a / base digit = a - next_a * base reversed_digits = reversed_digits", "disable elif face_name is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,", "0.0, 0.0] for i in xrange(num_cameras): # Use a 3D Hammersley point set", "file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output JSON", "'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group = {'views':", "depth channel in the output file. Commonly used values are 'R' (VRay) and", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "map(operator.mul, delta, delta) distance_sqr = 0.0 for element in delta_sqr: distance_sqr += element", "to the headbox center by the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min,", "depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import operator def ProjectPoint(matrix,", "headbox center by the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5,", "0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates", "of 3D points (each a list of 3 floats), representing the positions of", "t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def", "manifest and write it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip,", "implicitly result_hom[row] += matrix[4 * row + 3] w = result_hom[3] return map(operator.div,", "clip planes. near <= 0.0 or far <= near. \"\"\" if near <=", "the view_groups as a Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size,", "Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name,", "= map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample) position = map(operator.add, box_min,", "top = near a = (2.0 * near) / (right - left) b", "floats representing the upper bounds of the box. sample: A list of floats", "encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]),", "a given cube map face. Args: camera_name: Name of the Maya camera's transform", "manifest file. json_only: A boolean value. If true, the Maya camera generation step", "while a > 0: next_a = a / base digit = a -", "raise ValueError('near must be positive.') if far <= near: raise ValueError('far must be", "are computed as a 3D Hammersley point set. The points are transformed such", "the headbox center by the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max,", "view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path,", "col in xrange(3): result_hom[row] += matrix[4 * row + col] * point[col] #", "two points. The points can have an aribtrary number of dimensions. Args: point_a:", "each cube face, are generated. Each camera is configured with a square viewport", "headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON manifest and write it to", "'top']: # Create a cube face camera and rotate it. camera_name = maya.cmds.camera(", "cameras. Raises: ValueError: num_cameras is not positive. \"\"\" if num_cameras <= 0: raise", "horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for all positions", "in pixels. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space", "elif face_name is 'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,", "camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view", "depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups for the JSON output.", "and rotate it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip,", "16 floats. point: A 3D point represented as a list of 3 floats.", "list of 3D points (each a list of 3 floats) representing the positions", "A 4x4 matrix represented as a list of 16 floats. point: A 3D", "disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front': return [ 1.0, 0.0, 0.0,", "List of three floats representing the upper bounds of the headbox in world-space.", "planes. far_clip: Eye-space Z position of the far clipping planes. \"\"\" # Disable", "Use the headbox center if a single camera position is requested. return [PointInBox(headbox_min,", "positions) to generate. Must be a power of two. image_size: Resolution of the", "pyformat: disable elif face_name is 'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0,", "groups (camera positions) to generate. Must be a power of two. image_size: Resolution", "rotate it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0]", "xrange(3): world_from_eye_matrix[4 * i + 3] = position[i] # Create camera object camera", "= (2.0 * near * far) / (near - far) # pylint: disable=bad-whitespace", "(2.0 * near) / (right - left) b = (2.0 * near) /", "pattern for color images. Must contain a placeholder for a string (face name)", "the far clipping planes. depth_type: A string representing the depth encoding. Valid values", "row in xrange(4): for col in xrange(3): result_hom[row] += matrix[4 * row +", "file. json_only: A boolean value. If true, the Maya camera generation step is", "of the far clipping planes. \"\"\" # Disable the undefined-variable lint error, because", "headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates", "A boolean value. If true, the Maya camera generation step is bypassed. \"\"\"", "map face. Raises: ValueError: face is not a valid cube map face name.", "be a power of two. Returns: A list of 3D points (each a", "reversed_digits * base + digit base_n *= base a = next_a # Only", "\"\"\"Creates and returns the view groups for the JSON output. Args: headbox_center: Center", "not use this file except in compliance with the License. # You may", "far): \"\"\"Creates a cube-face 90 degree FOV projection matrix. The created matrix is", "0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'back': return [-1.0,", "list of floats representing the upper bounds of the box. sample: A list", "is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX',", "0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable def", "in enumerate(camera_positions): views = [] for face in ['front', 'back', 'left', 'right', 'bottom',", "cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya cameras and", "animation. Each of the six cameras will get one keyframe per camera position.", "represented as a list of 3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0,", "The projected point, represented as a list of 3 floats. \"\"\" result_hom =", "2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License,", "# Disable the undefined-variable lint error, because the Maya package is not #", "\"\"\"Projects a 3D point using a 4x4 matrix. Args: matrix: A 4x4 matrix", "-90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name')", "the box. Returns: A list of floats, representing the absolute position of the", "then sorted according to distance to the headbox center. Finally, the point that", "agreed to in writing, software # distributed under the License is distributed on", "representing the positions of the cameras. near_clip: Eye-space Z position of the near", "a list of 3 floats), representing the positions of the generated cameras. Raises:", "w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face of a cube", "0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name", "list of floats representing the lower bounds of the box. box_max: A list", "two. image_size: Resolution of the output images in pixels. near_clip: Eye-space Z position", "rig and JSON manifest for Seurat. Args: headbox_min: List of three floats representing", "a placeholder for a string (face name) and an integer (view group number).", "depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups for the JSON", "clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view object and add it", "point[col] # point.w = 1.0 implicitly result_hom[row] += matrix[4 * row + 3]", "1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "the first point. point_b: A list of numbers representing the second point. Returns:", "to generate. Must be a power of two. image_size: Resolution of the output", "'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0,", "far clipping planes. This method also adjusts the Maya timeline to exactly contain", "3] = position[i] # Create camera object camera = { 'image_width': image_size, 'image_height':", "computed in this base (integer). Returns: The radical inverse as a float in", "-1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'left':", "/ (near - far) # pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0,", "of the headbox as a list of 3 floats. headbox_max: The upper bounds", "1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera", "from the reference camera. Args: headbox_min: The lower bounds of the headbox as", "+ 3] = position[i] # Create camera object camera = { 'image_width': image_size,", "is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX',", "can have an aribtrary number of dimensions. Args: point_a: A list of numbers", "def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face of a cube map.", "# Set translation component of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 *", "to in writing, software # distributed under the License is distributed on an", "floats representing the upper bounds of the headbox in world-space. num_view_groups: Number of", "name of a cube map face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation", "implied. # See the License for the specific language governing permissions and #", "box. \"\"\" delta = map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample) position", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "/ float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim] =", "that their bounding box is the unit cube. for dim in xrange(3): sample[dim]", "maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif", "center if a single camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5,", "math import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a 4x4", "\"\"\"Generates camera positions in a headbox. Camera posittions are computed as a 3D", "for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should", "File name pattern for depth images. Must contain a placeholder for a string", "include a view from the reference camera. Args: headbox_min: The lower bounds of", "image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view object and", "near: Eye-space Z position of the near clipping plane. far: Eye-space Z position", "0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'right': return [", "range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf);", "return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0,", "view groups for the JSON output. Args: headbox_center: Center of the headbox as", "... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain a", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "specific language governing permissions and # limitations under the License. \"\"\"Generates a JSON", "[ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in xrange(3):", "dim in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions", "# Use a 3D Hammersley point set for the samples. sample = [", "disable elif face_name is 'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0,", "of the cube map face. Raises: ValueError: face is not a valid cube", "generated cameras. Raises: ValueError: num_cameras is not positive. \"\"\" if num_cameras <= 0:", "* row + col] * point[col] # point.w = 1.0 implicitly result_hom[row] +=", "a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5,", "samples so that their bounding box is the unit cube. for dim in", "# See the License for the specific language governing permissions and # limitations", "a 3D Hammersley point set for the samples. sample = [ i /", "\"\"\"Creates and animates the Maya cameras for the rig. Six cameras, one for", "distance to the headbox center. Finally, the point that is closest to the", "clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye", "boolean value. If true, the Maya camera generation step is bypassed. \"\"\" #", "= reversed_digits * base + digit base_n *= base a = next_a #", "which the radical inverse is computed. base: The radical inverse is computed in", "camera_positions: A list of 3D points (each a list of 3 floats) representing", "# defined in the environment where the linter runs. # # pylint: disable=undefined-variable", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90)", "you may not use this file except in compliance with the License. #", "= result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix", "pyformat: disable elif face_name is 'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0,", "face_name is 'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "sample in the box. \"\"\" delta = map(operator.sub, box_max, box_min) offset = map(operator.mul,", "A 3D point represented as a list of 3 floats. Returns: The projected", "Center of the headbox as a list of 3 floats. camera_positions: Positions of", "Path to the output JSON manifest file. json_only: A boolean value. If true,", "at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2])", "a 4x4 matrix. Args: matrix: A 4x4 matrix represented as a list of", "clipping planes. This method also adjusts the Maya timeline to exactly contain the", "face_name is 'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0,", "w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye", "\"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr =", "- bottom) e = (near + far) / (near - far) f =", "* near) / (top - bottom) c = (right + left) / (right", "one for each cube face, are generated. Each camera is configured with a", "arbitrary number of dimensions. Args: box_min: A list of floats representing the lower", "by the headbox center itself to include a view from the reference camera.", "the name of a cube map face. \"\"\" # pylint: disable=bad-whitespace # pylint:", "base b. while a > 0: next_a = a / base digit =", "= (top + bottom) / (top - bottom) e = (near + far)", "camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = []", "num_cameras <= 0: raise ValueError('num_cameras must be positive') if num_cameras == 1: #", "Maya timeline to exactly contain the frames for the rig animation. Each of", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "view = { 'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R',", "'top.9999.exr'. Returns: A dictionary representing the view groups. \"\"\" view_groups = [] for", "of the near clipping plane. far: Eye-space Z position of the far clipping", "in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name", "of the headbox as a list of 3 floats. camera_positions: Positions of the", "Args: face_name: Name of the face. Must be one of 'front', 'back', 'left',", "1 # Compute the reversed digits, base b. while a > 0: next_a", "maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif", "samples: # Normalize the samples so that their bounding box is the unit", "absolute_position in enumerate(camera_positions): views = [] for face in ['front', 'back', 'left', 'right',", "camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the point", "and far clipping planes. This method also adjusts the Maya timeline to exactly", "4x4 matrix. Args: matrix: A 4x4 matrix represented as a list of 16", "given face of a cube map. Args: face_name: Name of the face. Must", "- far) f = (2.0 * near * far) / (near - far)", "by b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a", "rig. Six cameras, one for each cube face, are generated. Each camera is", "= 0.0 for element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name,", "floats. headbox_max: The upper bounds of the headbox as a list of 3", "given near and far clipping planes. This method also adjusts the Maya timeline", "headbox in world-space. num_view_groups: Number of view groups (camera positions) to generate. Must", "closest to the headbox center is replaced by the headbox center itself to", "the far clipping planes. \"\"\" # Disable the undefined-variable lint error, because the", "numbers representing the second point. Returns: The euclidean distance as a float. \"\"\"", "world-from-eye matrix for the given face of a cube map. Args: face_name: Name", "num_cameras == 1: # Use the headbox center if a single camera position", "the JSON output. Args: headbox_center: Center of the headbox as a list of", "'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view object and add it to", "json_file_path: Path to the output JSON manifest file. json_only: A boolean value. If", "linter runs. # # pylint: disable=undefined-variable if face_name is 'front': pass elif face_name", "\"\"\" if near <= 0.0: raise ValueError('near must be positive.') if far <=", "base_n = 1 # Compute the reversed digits, base b. while a >", "A list of floats, representing the absolute position of the sample in the", "CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV projection matrix. The created matrix", "is 'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0,", "maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set", "in this base (integer). Returns: The radical inverse as a float in the", "} } views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return the view_groups as", "power of two. image_size: Resolution of the output images in pixels. near_clip: Eye-space", "to include a view from the reference camera. Args: headbox_min: The lower bounds", "matrix for the given face of a cube map. Args: face_name: Name of", "return position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two points. The", "given face as a list in row-major order. Raises: ValueError: face_name is not", "must be positive') if num_cameras == 1: # Use the headbox center if", "exactly contain the frames for the rig animation. Each of the six cameras", "if face_name is 'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,", "'back', 'left', 'right', 'bottom', 'top']: # Create a cube face camera and rotate", "face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name +", "'.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid", "+ '.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera", "a 3D point using a 4x4 matrix. Args: matrix: A 4x4 matrix represented", "= { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type }", "the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of", "import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a 4x4 matrix.", "the cameras as a list of 3D points (each a list of 3", "1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import", "true, the Maya camera generation step is bypassed. \"\"\" # Compute the positions", "Create view object and add it to the view groups color_image_path = (color_file_path_pattern", "def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV projection matrix. The created", "to look at a given cube map face. Args: camera_name: Name of the", "face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name +", "a = (2.0 * near) / (right - left) b = (2.0 *", "box is the unit cube. for dim in xrange(3): sample[dim] /= max_sample[dim] position", "list of 16 floats. point: A 3D point represented as a list of", "If true, the Maya camera generation step is bypassed. \"\"\" # Compute the", "result_hom[row] += matrix[4 * row + 3] w = result_hom[3] return map(operator.div, result_hom[0:3],", "a list of 3 floats. num_cameras: The number of cameras to generate. Should", "(view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'.", "0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0,", "base): \"\"\"Computes the radical inverse of |a| in base |base|. Args: a: The", "disable=undefined-variable start_time = 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time,", "[ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0,", "near) / (right - left) b = (2.0 * near) / (top -", "# pyformat: disable elif face_name is 'left': return [ 0.0, 0.0, 1.0, 0.0,", "3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates", "the unit cube. for dim in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min,", "of |a| in base |base|. Args: a: The integer number for which the", "Generate the JSON manifest and write it to the file. view_groups = CreateViewGroups(headbox_center,", "the samples. sample = [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ]", "not # defined in the environment where the linter runs. # # pylint:", "the six cameras will get one keyframe per camera position. Args: camera_positions: A", "the relative sample position in the box. Returns: A list of floats, representing", "box_max, box_min) offset = map(operator.mul, delta, sample) position = map(operator.add, box_min, offset) return", "next_a # Only when done are the reversed digits divided by b^n. return", "of view groups (camera positions) to generate. Must be a power of two.", "next_a = a / base digit = a - next_a * base reversed_digits", "\"\"\" if num_cameras <= 0: raise ValueError('num_cameras must be positive') if num_cameras ==", "pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left':", "Generate the six Maya cameras and keyframe their positions. if not json_only: CreateCameras(camera_positions,", "names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the view groups.", "float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside a box", "under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR", "for the given face as a list in row-major order. Raises: ValueError: face_name", "upper bounds of the headbox as a list of 3 floats. num_cameras: The", "Normalize the samples so that their bounding box is the unit cube. for", "list of 3 floats. headbox_max: The upper bounds of the headbox as a", "list of 3D points (each a list of 3 floats). image_size: Size of", "* base + digit base_n *= base a = next_a # Only when", "replaced by the headbox center itself to include a view from the reference", "'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name }", "look at a given cube map face. Args: camera_name: Name of the Maya", "of the far clipping planes. depth_type: A string representing the depth encoding. Valid", "0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'bottom': return [ 1.0,", "= (2.0 * near) / (right - left) b = (2.0 * near)", "for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Create a cube", "given cube map face. Args: camera_name: Name of the Maya camera's transform node.", "See the License for the specific language governing permissions and # limitations under", "\"\"\" # Disable the undefined-variable lint error, because the Maya package is not", "face_name is 'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "the absolute position of the sample in the box. \"\"\" delta = map(operator.sub,", "must be positive.') if far <= near: raise ValueError('far must be greater than", "depth_file_path_pattern): \"\"\"Creates and returns the view groups for the JSON output. Args: headbox_center:", "'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } }", "0.0, 0.0, 0.0] for row in xrange(4): for col in xrange(3): result_hom[row] +=", "of two. Returns: A list of 3D points (each a list of 3", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern,", "Maya camera rig and JSON manifest for Seurat. Args: headbox_min: List of three", "row + col] * point[col] # point.w = 1.0 implicitly result_hom[row] += matrix[4", "animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:", "headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample = [0.0, 0.0, 0.0] for", "(each a list of 3 floats) representing the positions of the cameras. near_clip:", "image_size: Resolution of the output images in pixels. near_clip: Eye-space Z position of", "near_clip, far_clip): \"\"\"Creates and animates the Maya cameras for the rig. Six cameras,", "to exactly contain the frames for the rig animation. Each of the six", "a string (face name) and an integer (view group number). Example: '%s.%04d.exr' for", "This method also adjusts the Maya timeline to exactly contain the frames for", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'left': return", "a cube face camera and rotate it. camera_name = maya.cmds.camera( name='seurat_' + face,", "a single camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples", "is the unit cube. for dim in xrange(3): sample[dim] /= max_sample[dim] position =", "world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix. for i in", "# Create a cube face camera and rotate it. camera_name = maya.cmds.camera( name='seurat_'", "near clipping plane. far: Eye-space Z position of the far clipping plane. Returns:", "Args: headbox_min: List of three floats representing the lower bounds of the headbox", "'top']: # Camera position relative to headbox center. position = map(operator.sub, absolute_position, headbox_center)", "face_name is 'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "box. Returns: A list of floats, representing the absolute position of the sample", "face_name: Name of the cube map face. Raises: ValueError: face is not a", "degree FOV projection matrix. The created matrix is an OpenGL-style projection matrix. Args:", "dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5,", "headbox center. Finally, the point that is closest to the headbox center is", "/ float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside a", "of a cube map face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if", "near a = (2.0 * near) / (right - left) b = (2.0", "maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name,", "writing, software # distributed under the License is distributed on an \"AS-IS\" BASIS,", "in xrange(num_cameras): # Use a 3D Hammersley point set for the samples. sample", "headbox. Camera posittions are computed as a 3D Hammersley point set. The points", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "numbers representing the first point. point_b: A list of numbers representing the second", "the headbox as a list of 3 floats. camera_positions: Positions of the cameras", "CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5,", "for a string (face name) and an integer (view group number). Example: '%s.%04d.exr'", "0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name +", "CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups},", "maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else:", "headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for sample in", "sample): \"\"\"Computes a sample point inside a box with arbitrary number of dimensions.", "names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images.", "headbox_max: The upper bounds of the headbox as a list of 3 floats.", "is configured with a square viewport and the given near and far clipping", "the given near and far clipping planes. This method also adjusts the Maya", "a power of two. Returns: A list of 3D points (each a list", "for which the radical inverse is computed. base: The radical inverse is computed", "world_from_eye_matrix[4 * i + 3] = position[i] # Create camera object camera =", "the upper bounds of the box. sample: A list of floats in the", "generated. Each camera is configured with a square viewport and the given near", "disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a| in base |base|.", "runs. # # pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions) - 1", "'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output JSON manifest file. json_only:", "lint error, because the Maya package is not # defined in the environment", "A list of floats representing the lower bounds of the box. box_max: A", "view_group = {'views': views} view_groups.append(view_group) # Return the view_groups as a Python list.", "dimensions. Args: box_min: A list of floats representing the lower bounds of the", "of 3 floats. Returns: The projected point, represented as a list of 3", "Create camera object camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix':", "positions on this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index,", "PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) #", "manifest and a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5],", "w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face of a", "# limitations under the License. \"\"\"Generates a JSON manifest and a Maya camera", "ValueError('far must be greater than near.') left = -near right = near bottom", "translation keyframes for all positions on this camera. for view_group_index, position in enumerate(camera_positions):", "world-space. num_view_groups: Number of view groups (camera positions) to generate. Must be a", "one keyframe per camera position. Args: camera_positions: A list of 3D points (each", "a list in row-major order. Raises: ValueError: Invalid clip planes. near <= 0.0", "= { 'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1':", "list of 3D points (each a list of 3 floats), representing the positions", "view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0':", "= CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix.", "Raises: ValueError: num_cameras is not positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras", "KIND, either express or implied. # See the License for the specific language", "point using a 4x4 matrix. Args: matrix: A 4x4 matrix represented as a", "\"\"\"Creates world-from-eye matrix for the given face of a cube map. Args: face_name:", "0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'left': return [", "representing the view groups. \"\"\" view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions):", "'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } } }", "\"\"\"Generates a JSON manifest and a Maya camera rig for Seurat. Example usage:", "six cameras will get one keyframe per camera position. Args: camera_positions: A list", "PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON manifest and write it", "3 floats), representing the positions of the generated cameras. Raises: ValueError: num_cameras is", "def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False):", "of 3D points (each a list of 3 floats). image_size: Size of the", "to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern,", "<= near. \"\"\" if near <= 0.0: raise ValueError('near must be positive.') if", "in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'back': return", "to the headbox. The points are then sorted according to distance to the", "\"\"\" import json import math import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D", "are generated. Each camera is configured with a square viewport and the given", "environment where the linter runs. # # pylint: disable=undefined-variable if face_name is 'front':", "lower bounds of the box. box_max: A list of floats representing the upper", "reversed digits divided by b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max,", "of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json", "headbox as a list of 3 floats. camera_positions: Positions of the cameras as", "points are transformed such that their bounding box is exactly equal to the", "File name pattern for color images. Must contain a placeholder for a string", "in row-major order. Raises: ValueError: Invalid clip planes. near <= 0.0 or far", "ANY KIND, either express or implied. # See the License for the specific", "/ (top - bottom) e = (near + far) / (near - far)", "bounding box is the unit cube. for dim in xrange(3): sample[dim] /= max_sample[dim]", "Args: headbox_center: Center of the headbox as a list of 3 floats. camera_positions:", "sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the", "integer number for which the radical inverse is computed. base: The radical inverse", "'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view object and add", "'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0,", "0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif", "sorted according to distance to the headbox center. Finally, the point that is", "for row in xrange(4): for col in xrange(3): result_hom[row] += matrix[4 * row", "distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node", "> 0: next_a = a / base digit = a - next_a *", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "ValueError: num_cameras is not positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras must", "3) ] for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center =", "camera node to look at a given cube map face. Args: camera_name: Name", "camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B',", "math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to look at a", "raise ValueError('far must be greater than near.') left = -near right = near", "name pattern for depth images. Must contain a placeholder for a string (face", "[0.5, 0.5, 0.5]) # Generate the JSON manifest and write it to the", "'%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the", "for element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates", "3D Hammersley point set for the samples. sample = [ i / float(num_cameras),", "in samples: # Normalize the samples so that their bounding box is the", "return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0,", "are transformed such that their bounding box is exactly equal to the headbox.", "\"\"\"Computes the radical inverse of |a| in base |base|. Args: a: The integer", "world_from_eye_matrix, 'depth_type': depth_type } # Create view object and add it to the", "0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "a view from the reference camera. Args: headbox_min: The lower bounds of the", "return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point", "{'views': views} view_groups.append(view_group) # Return the view_groups as a Python list. return view_groups", "their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center.", "# pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a| in", "0.0, 1.0] # pyformat: disable elif face_name is 'back': return [-1.0, 0.0, 0.0,", "0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "near <= 0.0: raise ValueError('near must be positive.') if far <= near: raise", "= near a = (2.0 * near) / (right - left) b =", "far) / (near - far) # pylint: disable=bad-whitespace return [a, 0.0, c, 0.0,", "be greater than near.') left = -near right = near bottom = -near", "= len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front',", "0.5, 0.5], num_view_groups=16, # Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0,", "samples = [] max_sample = [0.0, 0.0, 0.0] for i in xrange(num_cameras): #", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "Number of view groups (camera positions) to generate. Must be a power of", "of dimensions. Args: box_min: A list of floats representing the lower bounds of", "position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample", "reversed digits, base b. while a > 0: next_a = a / base", "floats. Returns: The projected point, represented as a list of 3 floats. \"\"\"", "near: raise ValueError('far must be greater than near.') left = -near right =", "Maya cameras for the rig. Six cameras, one for each cube face, are", "'.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions", "-1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is", "left) / (right - left) d = (top + bottom) / (top -", "A string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate", "the output file. Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern:", "the headbox center itself to include a view from the reference camera. Args:", "for the rig. Six cameras, one for each cube face, are generated. Each", "face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV projection matrix. The", "have an aribtrary number of dimensions. Args: point_a: A list of numbers representing", "= {'views': views} view_groups.append(view_group) # Return the view_groups as a Python list. return", "Returns: A dictionary representing the view groups. \"\"\" view_groups = [] for view_group_index,", "world-space. headbox_max: List of three floats representing the upper bounds of the headbox", "len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back',", "\"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "representing the lower bounds of the headbox in world-space. headbox_max: List of three", "applicable law or agreed to in writing, software # distributed under the License", "to the headbox center is replaced by the headbox center itself to include", "0: raise ValueError('num_cameras must be positive') if num_cameras == 1: # Use the", "camera and rotate it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1,", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name", "the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the", "headbox_max, num_view_groups) # Generate the six Maya cameras and keyframe their positions. if", "contain a placeholder for a string (face name) and an integer (view group", "far) # pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b, d, 0.0,", "contain the frames for the rig animation. Each of the six cameras will", "for the JSON output. Args: headbox_center: Center of the headbox as a list", "in the output file. Commonly used values are 'R' (VRay) and 'A' (Arnold).", "0.0, 1.0] # pyformat: disable elif face_name is 'bottom': return [ 1.0, 0.0,", "clipping plane. far: Eye-space Z position of the far clipping plane. Returns: The", "/ base digit = a - next_a * base reversed_digits = reversed_digits *", "Finally, the point that is closest to the headbox center is replaced by", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions,", ", 'top.9999.exr'. json_file_path: Path to the output JSON manifest file. json_only: A boolean", "far_clip: Eye-space Z position of the far clipping planes. \"\"\" # Disable the", "raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV projection", "right = near bottom = -near top = near a = (2.0 *", "list of 3 floats. num_cameras: The number of cameras to generate. Should be", "delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0 for element in delta_sqr: distance_sqr", "= GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya cameras and keyframe their", "left) b = (2.0 * near) / (top - bottom) c = (right", "point_a: A list of numbers representing the first point. point_b: A list of", "list of numbers representing the second point. Returns: The euclidean distance as a", "requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample = [0.0,", "a > 0: next_a = a / base digit = a - next_a", "= sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the point closest to", "'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the view groups. \"\"\"", "the view groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern %", "representing the lower bounds of the box. box_max: A list of floats representing", "1.0] # pyformat: disable elif face_name is 'bottom': return [ 1.0, 0.0, 0.0,", "camera generation step is bypassed. \"\"\" # Compute the positions of the cameras.", "point, represented as a list of 3 floats. \"\"\" result_hom = [0.0, 0.0,", "the headbox in world-space. headbox_max: List of three floats representing the upper bounds", "color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and JSON manifest for", "0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif", "3D points (each a list of 3 floats) representing the positions of the", "depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import operator def ProjectPoint(matrix, point): \"\"\"Projects", "matrix. The created matrix is an OpenGL-style projection matrix. Args: near: Eye-space Z", "'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' },", "(each a list of 3 floats), representing the positions of the generated cameras.", "is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max,", "far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups for the", "compliance with the License. # You may obtain a copy of the License", "+ 3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name):", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0,", "max_sample = [0.0, 0.0, 0.0] for i in xrange(num_cameras): # Use a 3D", "clipping planes. far_clip: Eye-space Z position of the far clipping planes. \"\"\" #", "face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for", "view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file':", "0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'top': return [ 1.0,", "number of dimensions. Args: box_min: A list of floats representing the lower bounds", "1: # Use the headbox center if a single camera position is requested.", "such that their bounding box is exactly equal to the headbox. The points", "point.w = 1.0 implicitly result_hom[row] += matrix[4 * row + 3] w =", "the cameras. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space", "software # distributed under the License is distributed on an \"AS-IS\" BASIS, #", "the output images in pixels. near_clip: Eye-space Z position of the near clipping", "as a float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta,", "next_a * base reversed_digits = reversed_digits * base + digit base_n *= base", "0.5]) camera_positions = [] for sample in samples: # Normalize the samples so", "set. The points are transformed such that their bounding box is exactly equal", "file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth", "for view_group_index, absolute_position in enumerate(camera_positions): views = [] for face in ['front', 'back',", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'right':", "Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to", "Name of the cube map face. Raises: ValueError: face is not a valid", "a: The integer number for which the radical inverse is computed. base: The", "# pyformat: disable elif face_name is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0,", "point_b: A list of numbers representing the second point. Returns: The euclidean distance", "timeline to exactly contain the frames for the rig animation. Each of the", "(right + left) / (right - left) d = (top + bottom) /", "Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z'", "num_cameras): \"\"\"Generates camera positions in a headbox. Camera posittions are computed as a", "Args: point_a: A list of numbers representing the first point. point_b: A list", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable else: raise", "six Maya cameras and keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip)", "a list of 3 floats. camera_positions: Positions of the cameras as a list", "view_groups.append(view_group) # Return the view_groups as a Python list. return view_groups def CreateRig(headbox_min,", "near and far clipping planes. This method also adjusts the Maya timeline to", "of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 * i + 3] =", "the positions of the cameras. near_clip: Eye-space Z position of the near clipping", "euclidean distance between two points. The points can have an aribtrary number of", "3 floats) representing the positions of the cameras. near_clip: Eye-space Z position of", "for each cube face, are generated. Each camera is configured with a square", "3D Hammersley point set. The points are transformed such that their bounding box", "[PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample = [0.0, 0.0, 0.0]", "(the \"License\"); # you may not use this file except in compliance with", "view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index,", "3 floats. camera_positions: Positions of the cameras as a list of 3D points", "to headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix", "floats), representing the positions of the generated cameras. Raises: ValueError: num_cameras is not", "# Unless required by applicable law or agreed to in writing, software #", "image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math", "by applicable law or agreed to in writing, software # distributed under the", "base (integer). Returns: The radical inverse as a float in the range [0.0,", "of the depth channel in the output file. Commonly used values are 'R'", "the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return", "The points can have an aribtrary number of dimensions. Args: point_a: A list", "the sample in the box. \"\"\" delta = map(operator.sub, box_max, box_min) offset =", "list of 3 floats), representing the positions of the generated cameras. Raises: ValueError:", "0.0 for element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name):", "animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: #", "elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name", "0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "1.0] # pyformat: disable elif face_name is 'top': return [ 1.0, 0.0, 0.0,", "under the License. \"\"\"Generates a JSON manifest and a Maya camera rig for", "this base (integer). Returns: The radical inverse as a float in the range", "file except in compliance with the License. # You may obtain a copy", "the view groups. \"\"\" view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions): views", "at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern):", "planes. near <= 0.0 or far <= near. \"\"\" if near <= 0.0:", "= [0.0, 0.0, 0.0, 0.0] for row in xrange(4): for col in xrange(3):", "elif face_name is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "+ left) / (right - left) d = (top + bottom) / (top", "depth_channel_name: Name of the depth channel in the output file. Commonly used values", "projected point, represented as a list of 3 floats. \"\"\" result_hom = [0.0,", "the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range", "0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "if num_cameras == 1: # Use the headbox center if a single camera", "method also adjusts the Maya timeline to exactly contain the frames for the", "near bottom = -near top = near a = (2.0 * near) /", "headbox_min: List of three floats representing the lower bounds of the headbox in", "Must be one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye", "Args: headbox_min: The lower bounds of the headbox as a list of 3", "matrix represented as a list of 16 floats. point: A 3D point represented", "+ digit base_n *= base a = next_a # Only when done are", "the cube map face. Raises: ValueError: face is not a valid cube map", "0.0, 1.0] # pyformat: disable elif face_name is 'left': return [ 0.0, 0.0,", "# pyformat: disable elif face_name is 'bottom': return [ 1.0, 0.0, 0.0, 0.0,", "1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0,", "the range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n = 1 # Compute", "order. Raises: ValueError: face_name is not the name of a cube map face.", "face, are generated. Each camera is configured with a square viewport and the", "encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel in the", "return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples = [] max_sample = [0.0, 0.0,", "rig animation. Each of the six cameras will get one keyframe per camera", "0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'left': return [ 0.0,", "face_name is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "face_name is 'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,", "it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name,", "a JSON manifest and a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5,", "'.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is", "[ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0,", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0,", "an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "view groups (camera positions) to generate. Must be a power of two. image_size:", "near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with open(json_file_path,", "face) # Set translation keyframes for all positions on this camera. for view_group_index,", "must be greater than near.') left = -near right = near bottom =", "# pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time,", "1.0] representing the relative sample position in the box. Returns: A list of", "The euclidean distance as a float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr", "= map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0 for", "a Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type,", "planes. This method also adjusts the Maya timeline to exactly contain the frames", "views = [] for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: #", "groups. \"\"\" view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions): views = []", "of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya", "else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a", "far <= near. \"\"\" if near <= 0.0: raise ValueError('near must be positive.')", "and JSON manifest for Seurat. Args: headbox_min: List of three floats representing the", "* far) / (near - far) # pylint: disable=bad-whitespace return [a, 0.0, c,", "the headbox as a list of 3 floats. headbox_max: The upper bounds of", "* row + 3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w])", "in the box. \"\"\" delta = map(operator.sub, box_max, box_min) offset = map(operator.mul, delta,", "disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree", "is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name", "radical inverse is computed. base: The radical inverse is computed in this base", "limitations under the License. \"\"\"Generates a JSON manifest and a Maya camera rig", "view_groups as a Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip,", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'bottom':", "[ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,", "(top - bottom) c = (right + left) / (right - left) d", "A list of numbers representing the second point. Returns: The euclidean distance as", "represented as a list of 16 floats. point: A 3D point represented as", "cube map face. Args: camera_name: Name of the Maya camera's transform node. face_name:", "i in xrange(3): world_from_eye_matrix[4 * i + 3] = position[i] # Create camera", "file. Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name", "3D points (each a list of 3 floats). image_size: Size of the output", "0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "Maya package is not # defined in the environment where the linter runs.", "0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'top': return [", "distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel in", "the samples so that their bounding box is the unit cube. for dim", "0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya", "for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max,", "far) f = (2.0 * near * far) / (near - far) #", "4x4 matrix represented as a list of 16 floats. point: A 3D point", "return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern,", "name. \"\"\" # Disable the undefined-variable lint error, because the Maya package is", "cube map. Args: face_name: Name of the face. Must be one of 'front',", "points (each a list of 3 floats). image_size: Size of the output images", "+ '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise", "'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': {", "of floats, representing the absolute position of the sample in the box. \"\"\"", "for the given face of a cube map. Args: face_name: Name of the", "Maya camera node to look at a given cube map face. Args: camera_name:", "left = -near right = near bottom = -near top = near a", "the positions of the generated cameras. Raises: ValueError: num_cameras is not positive. \"\"\"", "in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range", "f = (2.0 * near * far) / (near - far) # pylint:", "return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya cameras for", "map face name. \"\"\" # Disable the undefined-variable lint error, because the Maya", "the linter runs. # # pylint: disable=undefined-variable if face_name is 'front': pass elif", "the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya cameras", "+ far) / (near - far) f = (2.0 * near * far)", "RotateCamera(camera_name, face) # Set translation keyframes for all positions on this camera. for", "bounds of the box. sample: A list of floats in the range [0.0,", "bottom) e = (near + far) / (near - far) f = (2.0", "and add it to the view groups color_image_path = (color_file_path_pattern % (face, view_group_index))", "number for which the radical inverse is computed. base: The radical inverse is", "'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90)", "def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox. Camera posittions are", "the specific language governing permissions and # limitations under the License. \"\"\"Generates a", "0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "floats. num_cameras: The number of cameras to generate. Should be a power of", "center by the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5,", "in the environment where the linter runs. # # pylint: disable=undefined-variable start_time =", "face camera and rotate it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1,", "delta, sample) position = map(operator.add, box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes", "list of 3 floats. camera_positions: Positions of the cameras as a list of", "of 3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for row in", "disable=undefined-variable if face_name is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY',", "'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the given face as", "'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside", "PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside a box with arbitrary number", "face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox. Camera posittions", "integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... ,", "component of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 * i + 3]", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid face_name')", "(right - left) d = (top + bottom) / (top - bottom) e", "key=lambda point: Distance(point, headbox_center)) # Replace the point closest to the headbox center", "(near - far) # pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b,", "depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and JSON", "for i in xrange(num_cameras): # Use a 3D Hammersley point set for the", "of the cameras as a list of 3D points (each a list of", "the generated cameras. Raises: ValueError: num_cameras is not positive. \"\"\" if num_cameras <=", "ValueError('near must be positive.') if far <= near: raise ValueError('far must be greater", "a = next_a # Only when done are the reversed digits divided by", "headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix =", "the upper bounds of the headbox in world-space. num_view_groups: Number of view groups", "list of 3 floats. Returns: The projected point, represented as a list of", "ValueError: Invalid clip planes. near <= 0.0 or far <= near. \"\"\" if", "groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index))", "runs. # # pylint: disable=undefined-variable if face_name is 'front': pass elif face_name is", "in writing, software # distributed under the License is distributed on an \"AS-IS\"", "is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY',", "pylint: disable=undefined-variable if face_name is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name +", "bypassed. \"\"\" # Compute the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max,", "the depth channel in the output file. Commonly used values are 'R' (VRay)", "if far <= near: raise ValueError('far must be greater than near.') left =", "180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right':", "[] max_sample = [0.0, 0.0, 0.0] for i in xrange(num_cameras): # Use a", "'channel_0': depth_channel_name } } } views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return", "(near + far) / (near - far) f = (2.0 * near *", "as a list of 3D points (each a list of 3 floats). image_size:", "'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view)", "plane. far: Eye-space Z position of the far clipping plane. Returns: The clip-from-eye", "language governing permissions and # limitations under the License. \"\"\"Generates a JSON manifest", "a square viewport and the given near and far clipping planes. This method", "\"\"\"Computes the euclidean distance between two points. The points can have an aribtrary", "maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ',", "near) / (top - bottom) c = (right + left) / (right -", "point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0 for element in delta_sqr:", "generation step is bypassed. \"\"\" # Compute the positions of the cameras. camera_positions", "single camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])] samples =", "JSON manifest for Seurat. Args: headbox_min: List of three floats representing the lower", "views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return the view_groups as a Python", "Args: near: Eye-space Z position of the near clipping plane. far: Eye-space Z", "pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a| in base", "matrix. for i in xrange(3): world_from_eye_matrix[4 * i + 3] = position[i] #", "map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation", "] for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min,", "num_cameras is not positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras must be", "'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH'", "(Arnold). color_file_path_pattern: File name pattern for color images. Must contain a placeholder for", "[0.0, 1.0). \"\"\" reversed_digits = 0 base_n = 1 # Compute the reversed", "list of 3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for row", "absolute position of the sample in the box. \"\"\" delta = map(operator.sub, box_max,", "returns the view groups for the JSON output. Args: headbox_center: Center of the", "bounds of the headbox as a list of 3 floats. num_cameras: The number", "element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a", "on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "} # Create view object and add it to the view groups color_image_path", "animates the Maya cameras for the rig. Six cameras, one for each cube", "the License for the specific language governing permissions and # limitations under the", "output images in pixels. near_clip: Eye-space Z position of the near clipping planes.", "with arbitrary number of dimensions. Args: box_min: A list of floats representing the", "box is exactly equal to the headbox. The points are then sorted according", "num_view_groups) # Generate the six Maya cameras and keyframe their positions. if not", "sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the point closest to the", "represented as a list of 3 floats. Returns: The projected point, represented as", "Returns: The world-from-eye matrix for the given face as a list in row-major", "(face name) and an integer (view group number). Example: '%s.%04d.exr' for file names", "Disable the undefined-variable lint error, because the Maya package is not # defined", "'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90)", "}, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group =", "\"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front': return [", "'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90)", "point. Returns: The euclidean distance as a float. \"\"\" delta = map(operator.sub, point_a,", "return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0,", "reference camera. Args: headbox_min: The lower bounds of the headbox as a list", "= [] for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera", "write it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type,", "'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel in the output", "of the box. box_max: A list of floats representing the upper bounds of", "valid cube map face name. \"\"\" # Disable the undefined-variable lint error, because", "far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with open(json_file_path, 'w')", "for color images. Must contain a placeholder for a string (face name) and", "are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space", "a cube map. Args: face_name: Name of the face. Must be one of", "names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output JSON manifest", "Positions of the cameras as a list of 3D points (each a list", "Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate", "map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0 for element", "\"\"\"Rotates a Maya camera node to look at a given cube map face.", "the JSON manifest and write it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "in row-major order. Raises: ValueError: face_name is not the name of a cube", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif", "The integer number for which the radical inverse is computed. base: The radical", "# Compute the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) #", "object camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type':", "the headbox center. Finally, the point that is closest to the headbox center", "- left) b = (2.0 * near) / (top - bottom) c =", "Maya camera generation step is bypassed. \"\"\" # Compute the positions of the", "the point that is closest to the headbox center is replaced by the", "def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns", "# # pylint: disable=undefined-variable if face_name is 'front': pass elif face_name is 'back':", "floats. camera_positions: Positions of the cameras as a list of 3D points (each", "b, d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0] # pyformat:", "far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix. for i", "elif face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name') def", "in world-space. headbox_max: List of three floats representing the upper bounds of the", "in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera position relative to headbox", "# pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front': return [ 1.0,", "of numbers representing the second point. Returns: The euclidean distance as a float.", "camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace the point closest to the headbox", "face as a list in row-major order. Raises: ValueError: face_name is not the", "Args: camera_name: Name of the Maya camera's transform node. face_name: Name of the", "Set translation keyframes for all positions on this camera. for view_group_index, position in", "headbox center itself to include a view from the reference camera. Args: headbox_min:", "representing the positions of the generated cameras. Raises: ValueError: num_cameras is not positive.", "xrange(4): for col in xrange(3): result_hom[row] += matrix[4 * row + col] *", "float in the range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n = 1", "using a 4x4 matrix. Args: matrix: A 4x4 matrix represented as a list", "= (2.0 * near) / (top - bottom) c = (right + left)", "The created matrix is an OpenGL-style projection matrix. Args: near: Eye-space Z position", "json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max,", "of two. image_size: Resolution of the output images in pixels. near_clip: Eye-space Z", "{ 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } #", "distance as a float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul,", "cameras for the rig. Six cameras, one for each cube face, are generated.", "max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions", "center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def", "The upper bounds of the headbox as a list of 3 floats. num_cameras:", "created matrix is an OpenGL-style projection matrix. Args: near: Eye-space Z position of", "camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size,", "of three floats representing the lower bounds of the headbox in world-space. headbox_max:", "configured with a square viewport and the given near and far clipping planes.", "Version 2.0 (the \"License\"); # you may not use this file except in", "the radical inverse of |a| in base |base|. Args: a: The integer number", "a list in row-major order. Raises: ValueError: face_name is not the name of", "images. Must contain a placeholder for a string (face name) and an integer", "= [0.0, 0.0, 0.0] for i in xrange(num_cameras): # Use a 3D Hammersley", "the box. box_max: A list of floats representing the upper bounds of the", "is exactly equal to the headbox. The points are then sorted according to", "Raises: ValueError: face_name is not the name of a cube map face. \"\"\"", "* base reversed_digits = reversed_digits * base + digit base_n *= base a", "bottom) c = (right + left) / (right - left) d = (top", "planes. \"\"\" # Disable the undefined-variable lint error, because the Maya package is", "face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name +", "of floats representing the upper bounds of the box. sample: A list of", "farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for all positions on this camera.", "-near right = near bottom = -near top = near a = (2.0", "depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import operator def", "posittions are computed as a 3D Hammersley point set. The points are transformed", "box_max: A list of floats representing the upper bounds of the box. sample:", "pattern for depth images. Must contain a placeholder for a string (face name)", "[0.0, 1.0] representing the relative sample position in the box. Returns: A list", "the range [0.0, 1.0] representing the relative sample position in the box. Returns:", "3 floats). image_size: Size of the output images in pixels. near_clip: Eye-space Z", "'.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is", "defined in the environment where the linter runs. # # pylint: disable=undefined-variable if", "the reversed digits divided by b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min,", "matrix is an OpenGL-style projection matrix. Args: near: Eye-space Z position of the", "headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center)) # Replace", "offset) return position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two points.", "position = map(operator.add, box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes the euclidean", "upper bounds of the headbox in world-space. num_view_groups: Number of view groups (camera", "Z position of the far clipping plane. Returns: The clip-from-eye matrix as a", "The radical inverse is computed in this base (integer). Returns: The radical inverse", "for the specific language governing permissions and # limitations under the License. \"\"\"Generates", "= a - next_a * base reversed_digits = reversed_digits * base + digit", "sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip):", "and write it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip,", "planes. depth_type: A string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space", "... , 'top.9999.exr'. json_file_path: Path to the output JSON manifest file. json_only: A", "float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim] = max(max_sample[dim],", "in the box. Returns: A list of floats, representing the absolute position of", "the far clipping plane. Returns: The clip-from-eye matrix as a list in row-major", "as a list of 3 floats. num_cameras: The number of cameras to generate.", "position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1])", "will get one keyframe per camera position. Args: camera_positions: A list of 3D", "of floats in the range [0.0, 1.0] representing the relative sample position in", "0 base_n = 1 # Compute the reversed digits, base b. while a", "= maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) #", "OpenGL-style projection matrix. Args: near: Eye-space Z position of the near clipping plane.", "'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must contain a placeholder", "0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "get one keyframe per camera position. Args: camera_positions: A list of 3D points", "delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr = 0.0", "if num_cameras <= 0: raise ValueError('num_cameras must be positive') if num_cameras == 1:", "near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z position", "itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip,", "Each of the six cameras will get one keyframe per camera position. Args:", "values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated", "'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0':", "[0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth", "\"\"\" # Compute the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups)", "'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the given face as a", "Must be a power of two. image_size: Resolution of the output images in", "headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power of two. image_size=1024, near_clip=0.1,", "color_file_path_pattern: File name pattern for color images. Must contain a placeholder for a", "Returns: The projected point, represented as a list of 3 floats. \"\"\" result_hom", "Args: a: The integer number for which the radical inverse is computed. base:", "return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,", "at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip,", "first point. point_b: A list of numbers representing the second point. Returns: The", "Each camera is configured with a square viewport and the given near and", "= -near top = near a = (2.0 * near) / (right -", "object and add it to the view groups color_image_path = (color_file_path_pattern % (face,", "depth images. Must contain a placeholder for a string (face name) and an", "of the sample in the box. \"\"\" delta = map(operator.sub, box_max, box_min) offset", "points (each a list of 3 floats), representing the positions of the generated", "near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import", "be a power of two. image_size: Resolution of the output images in pixels.", "maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left', 'right', 'bottom',", "manifest for Seurat. Args: headbox_min: List of three floats representing the lower bounds", "Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in", "coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name:", "elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name", "positions in a headbox. Camera posittions are computed as a 3D Hammersley point", "box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two", "t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip,", "point set. The points are transformed such that their bounding box is exactly", "at a given cube map face. Args: camera_name: Name of the Maya camera's", "3 floats. Returns: The projected point, represented as a list of 3 floats.", "of the generated cameras. Raises: ValueError: num_cameras is not positive. \"\"\" if num_cameras", "view_group_index, absolute_position in enumerate(camera_positions): views = [] for face in ['front', 'back', 'left',", "* i + 3] = position[i] # Create camera object camera = {", "is computed in this base (integer). Returns: The radical inverse as a float", "representing the upper bounds of the headbox in world-space. num_view_groups: Number of view", "'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path, 'channel_0': depth_channel_name", "eye). depth_channel_name: Name of the depth channel in the output file. Commonly used", "'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain", "for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the", "OF ANY KIND, either express or implied. # See the License for the", "points (each a list of 3 floats) representing the positions of the cameras.", "position. Args: camera_positions: A list of 3D points (each a list of 3", "of the Maya camera's transform node. face_name: Name of the cube map face.", "floats representing the lower bounds of the box. box_max: A list of floats", "and 'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must contain a", "# Compute the reversed digits, base b. while a > 0: next_a =", "point_b): \"\"\"Computes the euclidean distance between two points. The points can have an", "0.0, 1.0] # pyformat: disable elif face_name is 'top': return [ 1.0, 0.0,", "def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a 4x4 matrix. Args: matrix:", "base + digit base_n *= base a = next_a # Only when done", "of 3 floats. headbox_max: The upper bounds of the headbox as a list", "of the headbox as a list of 3 floats. num_cameras: The number of", "of the headbox in world-space. num_view_groups: Number of view groups (camera positions) to", "CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power of", "a float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta)", "headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON manifest and", "color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view", "Should be a power of two. Returns: A list of 3D points (each", "# Set translation keyframes for all positions on this camera. for view_group_index, position", "eye-space Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to", "(window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate", "as a 3D Hammersley point set. The points are transformed such that their", "positions of the cameras. near_clip: Eye-space Z position of the near clipping planes.", "= -near right = near bottom = -near top = near a =", "0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif", "camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2)", "the environment where the linter runs. # # pylint: disable=undefined-variable if face_name is", "all positions on this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX',", "it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name,", "# pyformat: disable elif face_name is 'right': return [ 0.0, 0.0, -1.0, 0.0,", "1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'back':", "0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a,", "divided by b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes", "bounds of the headbox as a list of 3 floats. headbox_max: The upper", "face of a cube map. Args: face_name: Name of the face. Must be", "for depth images. Must contain a placeholder for a string (face name) and", "node. face_name: Name of the cube map face. Raises: ValueError: face is not", "/ (top - bottom) c = (right + left) / (right - left)", "c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0,", "= WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix. for i in xrange(3):", "headbox. The points are then sorted according to distance to the headbox center.", "0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'bottom': return [", "offset = map(operator.mul, delta, sample) position = map(operator.add, box_min, offset) return position def", "a box with arbitrary number of dimensions. Args: box_min: A list of floats", "[ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "= CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups':", "matrix as a list in row-major order. Raises: ValueError: Invalid clip planes. near", "in xrange(4): for col in xrange(3): result_hom[row] += matrix[4 * row + col]", "FOV projection matrix. The created matrix is an OpenGL-style projection matrix. Args: near:", "generate. Must be a power of two. image_size: Resolution of the output images", "3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for row in xrange(4):", "cube face camera and rotate it. camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7,", "in the range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n = 1 #", "0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "a 3D Hammersley point set. The points are transformed such that their bounding", "0.5, 0.5])] samples = [] max_sample = [0.0, 0.0, 0.0] for i in", "a list of 3 floats. headbox_max: The upper bounds of the headbox as", "JSON output. Args: headbox_center: Center of the headbox as a list of 3", "is 'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,", "1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "of the cameras. near_clip: Eye-space Z position of the near clipping planes. far_clip:", "string (face name) and an integer (view group number). Example: '%s.%04d.exr' for file", "headbox_max: List of three floats representing the upper bounds of the headbox in", "point set for the samples. sample = [ i / float(num_cameras), RadicalInverse(i, 2),", "and a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5,", "in the environment where the linter runs. # # pylint: disable=undefined-variable if face_name", "or agreed to in writing, software # distributed under the License is distributed", "= [] for sample in samples: # Normalize the samples so that their", "pylint: disable=bad-continuation if face_name is 'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0,", "in xrange(3): result_hom[row] += matrix[4 * row + col] * point[col] # point.w", "the point closest to the headbox center by the headbox center # itself.", "the headbox in world-space. num_view_groups: Number of view groups (camera positions) to generate.", "(face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path,", "'back', 'left', 'right', 'bottom', 'top']: # Camera position relative to headbox center. position", "face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera position relative to", "The clip-from-eye matrix as a list in row-major order. Raises: ValueError: Invalid clip", "= 0 base_n = 1 # Compute the reversed digits, base b. while", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "so that their bounding box is the unit cube. for dim in xrange(3):", "the second point. Returns: The euclidean distance as a float. \"\"\" delta =", "/ (near - far) f = (2.0 * near * far) / (near", "- bottom) c = (right + left) / (right - left) d =", "Name of the face. Must be one of 'front', 'back', 'left', 'right', 'bottom',", "1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid", "camera_positions: Positions of the cameras as a list of 3D points (each a", "License. # You may obtain a copy of the License at # #", "[0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf); Arnold's", "<= near: raise ValueError('far must be greater than near.') left = -near right", "disable elif face_name is 'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0,", "in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Create a cube face camera", "view groups. \"\"\" view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions): views =", "JSON manifest and a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5,", "nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for all positions on this", "list in row-major order. Raises: ValueError: Invalid clip planes. near <= 0.0 or", "|a| in base |base|. Args: a: The integer number for which the radical", "groups for the JSON output. Args: headbox_center: Center of the headbox as a", "position of the far clipping planes. \"\"\" # Disable the undefined-variable lint error,", "color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import operator def ProjectPoint(matrix, point):", "3D points (each a list of 3 floats), representing the positions of the", "end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in", "face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front': return", "plane. Returns: The clip-from-eye matrix as a list in row-major order. Raises: ValueError:", "'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern:", "image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera", "0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0,", "1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "# pyformat: disable elif face_name is 'top': return [ 1.0, 0.0, 0.0, 0.0,", "Size of the output images in pixels. near_clip: Eye-space Z position of the", "view groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face,", "euclidean distance as a float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr =", "generate. Should be a power of two. Returns: A list of 3D points", "'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the view groups. \"\"\" view_groups", "1.0 implicitly result_hom[row] += matrix[4 * row + 3] w = result_hom[3] return", "maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates", "a list of 3D points (each a list of 3 floats). image_size: Size", "a list of 16 floats. point: A 3D point represented as a list", "0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "Raises: ValueError: Invalid clip planes. near <= 0.0 or far <= near. \"\"\"", "is replaced by the headbox center itself to include a view from the", "start_time = 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time)", "square viewport and the given near and far clipping planes. This method also", "pyformat: disable elif face_name is 'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0,", "representing the upper bounds of the box. sample: A list of floats in", "distributed under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES", "their bounding box is the unit cube. for dim in xrange(3): sample[dim] /=", "num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya", "\"\"\"Creates a cube-face 90 degree FOV projection matrix. The created matrix is an", "= (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view =", "or far <= near. \"\"\" if near <= 0.0: raise ValueError('near must be", "samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for sample", "the Maya cameras for the rig. Six cameras, one for each cube face,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "two. Returns: A list of 3D points (each a list of 3 floats),", "# Create camera object camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix,", "Hammersley point set for the samples. sample = [ i / float(num_cameras), RadicalInverse(i,", "json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and JSON manifest for Seurat. Args:", "= next_a # Only when done are the reversed digits divided by b^n.", "matrix. Args: near: Eye-space Z position of the near clipping plane. far: Eye-space", "coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the", "0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'right': return [ 0.0,", "1.0] # pyformat: disable elif face_name is 'left': return [ 0.0, 0.0, 1.0,", "point closest to the headbox center by the headbox center # itself. sorted_positions[0]", "near clipping planes. far_clip: Eye-space Z position of the far clipping planes. \"\"\"", "GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya cameras and keyframe their positions.", "= map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set", "Hammersley point set. The points are transformed such that their bounding box is", "undefined-variable lint error, because the Maya package is not # defined in the", "upper bounds of the box. sample: A list of floats in the range", "keyframes for all positions on this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe(", "camera position. Args: camera_positions: A list of 3D points (each a list of", "raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox.", "dimensions. Args: point_a: A list of numbers representing the first point. point_b: A", "(integer). Returns: The radical inverse as a float in the range [0.0, 1.0).", "positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras must be positive') if num_cameras", "# pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b, d, 0.0, 0.0,", "center itself to include a view from the reference camera. Args: headbox_min: The", "base: The radical inverse is computed in this base (integer). Returns: The radical", "camera_name = maya.cmds.camera( name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face)", "pyformat: disable elif face_name is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0,", "A list of floats representing the upper bounds of the box. sample: A", "is 'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5,", "Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name", "center is replaced by the headbox center itself to include a view from", "the Maya package is not # defined in the environment where the linter", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif face_name is", "elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name", "depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0,", "between two points. The points can have an aribtrary number of dimensions. Args:", "and keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the", "with a square viewport and the given near and far clipping planes. This", "... , 'top.9999.exr'. Returns: A dictionary representing the view groups. \"\"\" view_groups =", "Eye-space Z position of the near clipping plane. far: Eye-space Z position of", "def RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a| in base |base|. Args:", "'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group = {'views': views} view_groups.append(view_group)", "digit = a - next_a * base reversed_digits = reversed_digits * base +", "digit base_n *= base a = next_a # Only when done are the", "Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version", "in world-space. num_view_groups: Number of view groups (camera positions) to generate. Must be", "Name of the Maya camera's transform node. face_name: Name of the cube map", "camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index,", "name pattern for color images. Must contain a placeholder for a string (face", "WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face of a cube map. Args:", "as a list in row-major order. Raises: ValueError: Invalid clip planes. near <=", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is", "the box. sample: A list of floats in the range [0.0, 1.0] representing", "representing the relative sample position in the box. Returns: A list of floats,", "are the reversed digits divided by b^n. return min(reversed_digits / float(base_n), 1.0) def", "the view groups for the JSON output. Args: headbox_center: Center of the headbox", "than near.') left = -near right = near bottom = -near top =", "lower bounds of the headbox in world-space. headbox_max: List of three floats representing", "headbox_center)) # Replace the point closest to the headbox center by the headbox", "representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the", "base |base|. Args: a: The integer number for which the radical inverse is", "0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya cameras", "0.0, 0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical", "radical inverse of |a| in base |base|. Args: a: The integer number for", "xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])", "minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Create", "the lower bounds of the headbox in world-space. headbox_max: List of three floats", "(right - left) b = (2.0 * near) / (top - bottom) c", "or implied. # See the License for the specific language governing permissions and", "ValueError('num_cameras must be positive') if num_cameras == 1: # Use the headbox center", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'top': return", "frames for the rig animation. Each of the six cameras will get one", "xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted(", "cameras and keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute", "* near) / (right - left) b = (2.0 * near) / (top", "this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe(", "i + 3] = position[i] # Create camera object camera = { 'image_width':", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'top':", "for i in xrange(3): world_from_eye_matrix[4 * i + 3] = position[i] # Create", "projection matrix. The created matrix is an OpenGL-style projection matrix. Args: near: Eye-space", "PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for sample in samples: #", "bounds of the headbox in world-space. headbox_max: List of three floats representing the", "0.5, 0.5]) camera_positions = [] for sample in samples: # Normalize the samples", "as a list of 3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0]", "return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the", "Eye-space Z position of the far clipping plane. Returns: The clip-from-eye matrix as", "0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse", "pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time,", "relative sample position in the box. Returns: A list of floats, representing the", "Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two points. The points can have", "is not a valid cube map face name. \"\"\" # Disable the undefined-variable", "set for the samples. sample = [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i,", "= map(operator.mul, delta, delta) distance_sqr = 0.0 for element in delta_sqr: distance_sqr +=", "'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2':", "view from the reference camera. Args: headbox_min: The lower bounds of the headbox", "of 3 floats) representing the positions of the cameras. near_clip: Eye-space Z position", "number of dimensions. Args: point_a: A list of numbers representing the first point.", "0.5], num_view_groups=16, # Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z',", "matrix: A 4x4 matrix represented as a list of 16 floats. point: A", "the headbox center if a single camera position is requested. return [PointInBox(headbox_min, headbox_max,", "A dictionary representing the view groups. \"\"\" view_groups = [] for view_group_index, absolute_position", "headbox_center: Center of the headbox as a list of 3 floats. camera_positions: Positions", "box. sample: A list of floats in the range [0.0, 1.0] representing the", "that their bounding box is exactly equal to the headbox. The points are", "clipping planes. \"\"\" # Disable the undefined-variable lint error, because the Maya package", "sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for", "map(operator.add, box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between", "Name of the depth channel in the output file. Commonly used values are", "a power of two. image_size: Resolution of the output images in pixels. near_clip:", "b = (2.0 * near) / (top - bottom) c = (right +", "JSON manifest file. json_only: A boolean value. If true, the Maya camera generation", "near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups for", "use this file except in compliance with the License. # You may obtain", "a Maya camera node to look at a given cube map face. Args:", "pixels. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z", "Eye-space Z position of the near clipping planes. far_clip: Eye-space Z position of", "of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the", "i in xrange(num_cameras): # Use a 3D Hammersley point set for the samples.", "0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "the undefined-variable lint error, because the Maya package is not # defined in", "disable=bad-continuation if face_name is 'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,", "focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for all", "Set translation component of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 * i", "enumerate(camera_positions): views = [] for face in ['front', 'back', 'left', 'right', 'bottom', 'top']:", "closest to the headbox center by the headbox center # itself. sorted_positions[0] =", "a valid cube map face name. \"\"\" # Disable the undefined-variable lint error,", "/ (right - left) b = (2.0 * near) / (top - bottom)", "for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY',", "Returns: A list of 3D points (each a list of 3 floats), representing", "-1.0, 0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse of", "color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path':", "package is not # defined in the environment where the linter runs. #", "= map(operator.add, box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance", "positive.') if far <= near: raise ValueError('far must be greater than near.') left", "Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a", "their bounding box is exactly equal to the headbox. The points are then", "a list of 3 floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for", "clipping planes. depth_type: A string representing the depth encoding. Valid values are: 'WINDOW_Z'", "= near bottom = -near top = near a = (2.0 * near)", "0.0 or far <= near. \"\"\" if near <= 0.0: raise ValueError('near must", "camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six Maya cameras and keyframe", "max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = []", "<= 0: raise ValueError('num_cameras must be positive') if num_cameras == 1: # Use", "far <= near: raise ValueError('far must be greater than near.') left = -near", "num_cameras: The number of cameras to generate. Should be a power of two.", "v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and", "[-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0,", "headbox as a list of 3 floats. num_cameras: The number of cameras to", "[0.0, 0.0, 0.0] for i in xrange(num_cameras): # Use a 3D Hammersley point", "headbox center if a single camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5,", "and the given near and far clipping planes. This method also adjusts the", "as a list of 16 floats. point: A 3D point represented as a", "three floats representing the lower bounds of the headbox in world-space. headbox_max: List", "Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be", "sample position in the box. Returns: A list of floats, representing the absolute", "image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view", "of 3 floats), representing the positions of the generated cameras. Raises: ValueError: num_cameras", "*= base a = next_a # Only when done are the reversed digits", "return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to look at", "A list of 3D points (each a list of 3 floats) representing the", "} views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return the view_groups as a", "e, f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes", "is closest to the headbox center is replaced by the headbox center itself", "pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0,", "raise ValueError('num_cameras must be positive') if num_cameras == 1: # Use the headbox", "box_max, sample): \"\"\"Computes a sample point inside a box with arbitrary number of", "Invalid clip planes. near <= 0.0 or far <= near. \"\"\" if near", "Camera posittions are computed as a 3D Hammersley point set. The points are", "computed as a 3D Hammersley point set. The points are transformed such that", "center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face)", "depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group = {'views': views} view_groups.append(view_group) #", "face. Args: camera_name: Name of the Maya camera's transform node. face_name: Name of", "headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions", "positive') if num_cameras == 1: # Use the headbox center if a single", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "Compute the reversed digits, base b. while a > 0: next_a = a", "point inside a box with arbitrary number of dimensions. Args: box_min: A list", "point): \"\"\"Projects a 3D point using a 4x4 matrix. Args: matrix: A 4x4", "environment where the linter runs. # # pylint: disable=undefined-variable start_time = 0 end_time", "d = (top + bottom) / (top - bottom) e = (near +", "as a Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip,", "matrix for the given face as a list in row-major order. Raises: ValueError:", "Returns: The euclidean distance as a float. \"\"\" delta = map(operator.sub, point_a, point_b)", "for dim in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position)", "Six cameras, one for each cube face, are generated. Each camera is configured", "of a cube map. Args: face_name: Name of the face. Must be one", "(2.0 * near * far) / (near - far) # pylint: disable=bad-whitespace return", "1.0] # pyformat: disable elif face_name is 'back': return [-1.0, 0.0, 0.0, 0.0,", "the box. \"\"\" delta = map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample)", "b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample", "where the linter runs. # # pylint: disable=undefined-variable if face_name is 'front': pass", "order. Raises: ValueError: Invalid clip planes. near <= 0.0 or far <= near.", "= [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in", "with the License. # You may obtain a copy of the License at", "is 'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'back': return [-1.0, 0.0,", "cube. for dim in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample)", "position of the near clipping plane. far: Eye-space Z position of the far", "1.0). \"\"\" reversed_digits = 0 base_n = 1 # Compute the reversed digits,", "the Maya timeline to exactly contain the frames for the rig animation. Each", "[] for sample in samples: # Normalize the samples so that their bounding", "one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for", "ValueError: face is not a valid cube map face name. \"\"\" # Disable", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "matrix[4 * row + 3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w,", "(depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': { 'color': {", "world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 * i + 3] = position[i]", "governing permissions and # limitations under the License. \"\"\"Generates a JSON manifest and", "the License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\"", "0.5, 0.5]) # Generate the JSON manifest and write it to the file.", "map. Args: face_name: Name of the face. Must be one of 'front', 'back',", "json_file_path='./manifest.json') \"\"\" import json import math import operator def ProjectPoint(matrix, point): \"\"\"Projects a", "law or agreed to in writing, software # distributed under the License is", "position relative to headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip,", "sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions,", "+= element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to", "= [] max_sample = [0.0, 0.0, 0.0] for i in xrange(num_cameras): # Use", "[a, 0.0, c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0, e, f, 0.0,", "depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with open(json_file_path, 'w') as", "the License. \"\"\"Generates a JSON manifest and a Maya camera rig for Seurat.", "xrange(3): result_hom[row] += matrix[4 * row + col] * point[col] # point.w =", "[ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "value. If true, the Maya camera generation step is bypassed. \"\"\" # Compute", "max_sample[dim] position = PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point:", "clipping planes. far_clip: Eye-space Z position of the far clipping planes. depth_type: A", "in compliance with the License. # You may obtain a copy of the", "if near <= 0.0: raise ValueError('near must be positive.') if far <= near:", "transformed such that their bounding box is exactly equal to the headbox. The", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "positions of the generated cameras. Raises: ValueError: num_cameras is not positive. \"\"\" if", "two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import", "% (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera':", "= (near + far) / (near - far) f = (2.0 * near", "to generate. Should be a power of two. Returns: A list of 3D", "views} view_groups.append(view_group) # Return the view_groups as a Python list. return view_groups def", "keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox", "radical inverse is computed in this base (integer). Returns: The radical inverse as", "number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A", "usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power", "point represented as a list of 3 floats. Returns: The projected point, represented", "/ (right - left) d = (top + bottom) / (top - bottom)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "base_n *= base a = next_a # Only when done are the reversed", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Z position of the near clipping planes. far_clip: Eye-space Z position of the", "is 'back': return [-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "'right', 'bottom', 'top']: # Camera position relative to headbox center. position = map(operator.sub,", "<= 0.0 or far <= near. \"\"\" if near <= 0.0: raise ValueError('near", "CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya cameras for the rig. Six", "'right', 'bottom', 'top']: # Create a cube face camera and rotate it. camera_name", "floats. point: A 3D point represented as a list of 3 floats. Returns:", "center. Finally, the point that is closest to the headbox center is replaced", "far clipping planes. \"\"\" # Disable the undefined-variable lint error, because the Maya", "the rig. Six cameras, one for each cube face, are generated. Each camera", "Seurat. Args: headbox_min: List of three floats representing the lower bounds of the", "0.0] for i in xrange(num_cameras): # Use a 3D Hammersley point set for", "Maya camera's transform node. face_name: Name of the cube map face. Raises: ValueError:", "# Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr',", "be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json')", "floats) representing the positions of the cameras. near_clip: Eye-space Z position of the", "\"\"\"Creates a Maya camera rig and JSON manifest for Seurat. Args: headbox_min: List", "sample) position = map(operator.add, box_min, offset) return position def Distance(point_a, point_b): \"\"\"Computes the", "Args: box_min: A list of floats representing the lower bounds of the box.", "# Generate the JSON manifest and write it to the file. view_groups =", "sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya cameras for the", "- next_a * base reversed_digits = reversed_digits * base + digit base_n *=", "far clipping planes. depth_type: A string representing the depth encoding. Valid values are:", "else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV", "3 floats. headbox_max: The upper bounds of the headbox as a list of", "float. \"\"\" delta = map(operator.sub, point_a, point_b) delta_sqr = map(operator.mul, delta, delta) distance_sqr", "JSON manifest and write it to the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size,", "is bypassed. \"\"\" # Compute the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min,", "3D point using a 4x4 matrix. Args: matrix: A 4x4 matrix represented as", "\"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for row in xrange(4): for col", "result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face", "matrix. Args: matrix: A 4x4 matrix represented as a list of 16 floats.", "elif face_name is 'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0,", "import json import math import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point", "# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the", "of the six cameras will get one keyframe per camera position. Args: camera_positions:", "def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two points. The points can", "'bottom', 'top']: # Camera position relative to headbox center. position = map(operator.sub, absolute_position,", "0.0: raise ValueError('near must be positive.') if far <= near: raise ValueError('far must", "(color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern % (face, view_group_index)) view = {", "is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY',", "result_hom = [0.0, 0.0, 0.0, 0.0] for row in xrange(4): for col in", "the file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern)", "left) d = (top + bottom) / (top - bottom) e = (near", "= a / base digit = a - next_a * base reversed_digits =", "disable elif face_name is 'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "row-major order. Raises: ValueError: face_name is not the name of a cube map", "color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the view groups for the JSON output. Args:", "-90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is 'top':", "camera positions in a headbox. Camera posittions are computed as a 3D Hammersley", "far clipping plane. Returns: The clip-from-eye matrix as a list in row-major order.", "reversed_digits = reversed_digits * base + digit base_n *= base a = next_a", "position[i] # Create camera object camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix':", "Returns: The clip-from-eye matrix as a list in row-major order. Raises: ValueError: Invalid", "center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON manifest", "cameras as a list of 3D points (each a list of 3 floats).", "is an OpenGL-style projection matrix. Args: near: Eye-space Z position of the near", "+= matrix[4 * row + 3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w,", "clipping plane. Returns: The clip-from-eye matrix as a list in row-major order. Raises:", "viewport and the given near and far clipping planes. This method also adjusts", "Args: matrix: A 4x4 matrix represented as a list of 16 floats. point:", "delta, delta) distance_sqr = 0.0 for element in delta_sqr: distance_sqr += element return", "node to look at a given cube map face. Args: camera_name: Name of", "of dimensions. Args: point_a: A list of numbers representing the first point. point_b:", "face_name): \"\"\"Rotates a Maya camera node to look at a given cube map", "'bottom', 'top'. Returns: The world-from-eye matrix for the given face as a list", "CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates and returns the", "view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string =", "(2.0 * near) / (top - bottom) c = (right + left) /", "|base|. Args: a: The integer number for which the radical inverse is computed.", "box with arbitrary number of dimensions. Args: box_min: A list of floats representing", "['front', 'back', 'left', 'right', 'bottom', 'top']: # Create a cube face camera and", "= 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for", "near clipping planes. far_clip: Eye-space Z position of the far clipping planes. depth_type:", "the headbox. The points are then sorted according to distance to the headbox", "# Use the headbox center if a single camera position is requested. return", "Args: camera_positions: A list of 3D points (each a list of 3 floats)", "(VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must contain", "The world-from-eye matrix for the given face as a list in row-major order.", "* point[col] # point.w = 1.0 implicitly result_hom[row] += matrix[4 * row +", "= max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions =", "GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox. Camera posittions are computed", "0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is", "camera's transform node. face_name: Name of the cube map face. Raises: ValueError: face", "point. point_b: A list of numbers representing the second point. Returns: The euclidean", "rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, #", "for col in xrange(3): result_hom[row] += matrix[4 * row + col] * point[col]", "License is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "'.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name + '.rotateX', -90) elif face_name is", "1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left', 'right',", "position of the far clipping planes. depth_type: A string representing the depth encoding.", "'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha':", "face. Raises: ValueError: face is not a valid cube map face name. \"\"\"", "ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a 4x4 matrix. Args: matrix: A", "3D point represented as a list of 3 floats. Returns: The projected point,", "face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Create a cube face", "absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component", "(near - far) f = (2.0 * near * far) / (near -", "inverse as a float in the range [0.0, 1.0). \"\"\" reversed_digits = 0", "representing the absolute position of the sample in the box. \"\"\" delta =", "pyformat: disable elif face_name is 'bottom': return [ 1.0, 0.0, 0.0, 0.0, 0.0,", "0.0, c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0,", "this file except in compliance with the License. # You may obtain a", "digits, base b. while a > 0: next_a = a / base digit", "far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and", "3 floats. num_cameras: The number of cameras to generate. Should be a power", "far_clip) # Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])", "0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name", "Replace the point closest to the headbox center by the headbox center #", "1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "'front': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "0.0, 0.0] for row in xrange(4): for col in xrange(3): result_hom[row] += matrix[4", "second point. Returns: The euclidean distance as a float. \"\"\" delta = map(operator.sub,", "(distance to eye). depth_channel_name: Name of the depth channel in the output file.", "headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of", "as a list of 3 floats. Returns: The projected point, represented as a", "for sample in samples: # Normalize the samples so that their bounding box", "RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to look at a given cube", "bottom) / (top - bottom) e = (near + far) / (near -", "element return math.sqrt(distance_sqr) def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to look", "of 3 floats. num_cameras: The number of cameras to generate. Should be a", "- far) # pylint: disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b, d,", "and # limitations under the License. \"\"\"Generates a JSON manifest and a Maya", "not the name of a cube map face. \"\"\" # pylint: disable=bad-whitespace #", "floats in the range [0.0, 1.0] representing the relative sample position in the", "ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox. Camera", "pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90", "{ 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A'", "cube map face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is", "name='seurat_' + face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation", "be positive') if num_cameras == 1: # Use the headbox center if a", "as a list of 3 floats. camera_positions: Positions of the cameras as a", "camera is configured with a square viewport and the given near and far", "= PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates", "an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ...", "the lower bounds of the box. box_max: A list of floats representing the", "0.0, 0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far):", "far_clip): \"\"\"Creates and animates the Maya cameras for the rig. Six cameras, one", "[0.5, 0.5, 0.5])] samples = [] max_sample = [0.0, 0.0, 0.0] for i", "The lower bounds of the headbox as a list of 3 floats. headbox_max:", "The number of cameras to generate. Should be a power of two. Returns:", "of the box. sample: A list of floats in the range [0.0, 1.0]", "values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color", "points are then sorted according to distance to the headbox center. Finally, the", "dictionary representing the view groups. \"\"\" view_groups = [] for view_group_index, absolute_position in", "translation component of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4 * i +", "of 3D points (each a list of 3 floats) representing the positions of", "RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim])", "a float in the range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n =", "as a list of 3 floats. headbox_max: The upper bounds of the headbox", "a - next_a * base reversed_digits = reversed_digits * base + digit base_n", "face_name is 'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min,", "# itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions,", "ValueError: face_name is not the name of a cube map face. \"\"\" #", "0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix. for i in xrange(3): world_from_eye_matrix[4", "Create a cube face camera and rotate it. camera_name = maya.cmds.camera( name='seurat_' +", "position of the sample in the box. \"\"\" delta = map(operator.sub, box_max, box_min)", "headbox in world-space. headbox_max: List of three floats representing the upper bounds of", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "* near * far) / (near - far) # pylint: disable=bad-whitespace return [a,", "'left', 'right', 'bottom', 'top']: # Camera position relative to headbox center. position =", "# Create view object and add it to the view groups color_image_path =", "points can have an aribtrary number of dimensions. Args: point_a: A list of", "# Camera position relative to headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix", "output. Args: headbox_center: Center of the headbox as a list of 3 floats.", "the environment where the linter runs. # # pylint: disable=undefined-variable start_time = 0", "CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) # Set translation component of world-from-eye matrix. for", "<= 0.0: raise ValueError('near must be positive.') if far <= near: raise ValueError('far", "a cube map face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name", "near <= 0.0 or far <= near. \"\"\" if near <= 0.0: raise", "\"\"\"Computes a sample point inside a box with arbitrary number of dimensions. Args:", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is 'bottom': return", "a list of 3 floats) representing the positions of the cameras. near_clip: Eye-space", "'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain a placeholder for", "delta = map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample) position = map(operator.add,", "equal to the headbox. The points are then sorted according to distance to", "inverse is computed. base: The radical inverse is computed in this base (integer).", "the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the", "of floats representing the lower bounds of the box. box_max: A list of", "required by applicable law or agreed to in writing, software # distributed under", "of the output images in pixels. near_clip: Eye-space Z position of the near", "Return the view_groups as a Python list. return view_groups def CreateRig(headbox_min, headbox_max, num_view_groups,", "channel in the output file. Commonly used values are 'R' (VRay) and 'A'", "it to the view groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path =", "base a = next_a # Only when done are the reversed digits divided", "b. while a > 0: next_a = a / base digit = a", "placeholder for a string (face name) and an integer (view group number). Example:", "Use a 3D Hammersley point set for the samples. sample = [ i", "0: next_a = a / base digit = a - next_a * base", "The points are then sorted according to distance to the headbox center. Finally,", "the six Maya cameras and keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip,", "-0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power of two.", "operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a 4x4 matrix. Args:", "if face_name is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180)", "0.5]) # Generate the JSON manifest and write it to the file. view_groups", "position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip) world_from_eye_matrix = WorldFromEyeMatrixFromFace(face) #", "Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "relative to headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix = CubeFaceProjectionMatrix(near_clip, far_clip)", "in the range [0.0, 1.0] representing the relative sample position in the box.", "# Replace the point closest to the headbox center by the headbox center", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "point: Distance(point, headbox_center)) # Replace the point closest to the headbox center by", "if a single camera position is requested. return [PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])]", "distance between two points. The points can have an aribtrary number of dimensions.", "0.0] for row in xrange(4): for col in xrange(3): result_hom[row] += matrix[4 *", "depth_file_path_pattern: File name pattern for depth images. Must contain a placeholder for a", "box. box_max: A list of floats representing the upper bounds of the box.", "0.0, 1.0] # pyformat: disable elif face_name is 'right': return [ 0.0, 0.0,", "near_clip, far_clip) # Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5,", "elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom': maya.cmds.setAttr(camera_name", "elif face_name is 'left': return [ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,", "position of the near clipping planes. far_clip: Eye-space Z position of the far", "-near top = near a = (2.0 * near) / (right - left)", "is computed. base: The radical inverse is computed in this base (integer). Returns:", "v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center,", "list of floats, representing the absolute position of the sample in the box.", "list of 3 floats). image_size: Size of the output images in pixels. near_clip:", "for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output", "the radical inverse is computed. base: The radical inverse is computed in this", "sample point inside a box with arbitrary number of dimensions. Args: box_min: A", "world-from-eye matrix for the given face as a list in row-major order. Raises:", "face name. \"\"\" # Disable the undefined-variable lint error, because the Maya package", "disable=bad-whitespace return [a, 0.0, c, 0.0, 0.0, b, d, 0.0, 0.0, 0.0, e,", "bounds of the box. box_max: A list of floats representing the upper bounds", "not positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras must be positive') if", "lower bounds of the headbox as a list of 3 floats. headbox_max: The", "clip-from-eye matrix as a list in row-major order. Raises: ValueError: Invalid clip planes.", "base digit = a - next_a * base reversed_digits = reversed_digits * base", "'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth': { 'path': depth_image_path,", "the Maya camera generation step is bypassed. \"\"\" # Compute the positions of", "-1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable", "near * far) / (near - far) # pylint: disable=bad-whitespace return [a, 0.0,", "when done are the reversed digits divided by b^n. return min(reversed_digits / float(base_n),", "camera_name: Name of the Maya camera's transform node. face_name: Name of the cube", "# # pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions) - 1 maya.cmds.playbackOptions(", "are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color images.", "far: Eye-space Z position of the far clipping plane. Returns: The clip-from-eye matrix", "is distributed on an \"AS-IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "point that is closest to the headbox center is replaced by the headbox", "list of numbers representing the first point. point_b: A list of numbers representing", "v=position[1]) maya.cmds.setKeyframe( camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type,", "and returns the view groups for the JSON output. Args: headbox_center: Center of", "the given face as a list in row-major order. Raises: ValueError: face_name is", "the Maya camera's transform node. face_name: Name of the cube map face. Raises:", "as a float in the range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n", "number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File", "be positive.') if far <= near: raise ValueError('far must be greater than near.')", "Z position of the near clipping plane. far: Eye-space Z position of the", "1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat:", "depth_type } # Create view object and add it to the view groups", "json_only: A boolean value. If true, the Maya camera generation step is bypassed.", "list in row-major order. Raises: ValueError: face_name is not the name of a", "'bottom', 'top']: # Create a cube face camera and rotate it. camera_name =", "# you may not use this file except in compliance with the License.", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable else:", "0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif", "samples. sample = [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for", "of cameras to generate. Should be a power of two. Returns: A list", "output JSON manifest file. json_only: A boolean value. If true, the Maya camera", "Compute the positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate", "a list of 3 floats. Returns: The projected point, represented as a list", "Returns: The radical inverse as a float in the range [0.0, 1.0). \"\"\"", "bounds of the headbox in world-space. num_view_groups: Number of view groups (camera positions)", "[0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the", "0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] #", "Returns: A list of floats, representing the absolute position of the sample in", "itself to include a view from the reference camera. Args: headbox_min: The lower", "'top': maya.cmds.setAttr(camera_name + '.rotateX', 90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras):", "bounding box is exactly equal to the headbox. The points are then sorted", "radical inverse as a float in the range [0.0, 1.0). \"\"\" reversed_digits =", "0.0, e, f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a, base):", "the given face of a cube map. Args: face_name: Name of the face.", "is not # defined in the environment where the linter runs. # #", "# Compute the headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) #", "= PointInBox(headbox_min, headbox_max, sample) camera_positions.append(position) sorted_positions = sorted( camera_positions, key=lambda point: Distance(point, headbox_center))", "col] * point[col] # point.w = 1.0 implicitly result_hom[row] += matrix[4 * row", "Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5],", "[] for view_group_index, absolute_position in enumerate(camera_positions): views = [] for face in ['front',", "(each a list of 3 floats). image_size: Size of the output images in", "Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr',", "def RotateCamera(camera_name, face_name): \"\"\"Rotates a Maya camera node to look at a given", "inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel", "\"\"\" delta = map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample) position =", "output file. Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File", "in xrange(3): world_from_eye_matrix[4 * i + 3] = position[i] # Create camera object", "Must contain a placeholder for a string (face name) and an integer (view", "'%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern", "License for the specific language governing permissions and # limitations under the License.", "Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary", "depth_channel_name } } } views.append(view) view_group = {'views': views} view_groups.append(view_group) # Return the", "the rig animation. Each of the six cameras will get one keyframe per", "representing the second point. Returns: The euclidean distance as a float. \"\"\" delta", "in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe(", "+ '.rotateY', 180) elif face_name is 'left': maya.cmds.setAttr(camera_name + '.rotateY', 90) elif face_name", "'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the given face", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "camera_name, at='translateZ', t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern,", "of 3 floats). image_size: Size of the output images in pixels. near_clip: Eye-space", "Resolution of the output images in pixels. near_clip: Eye-space Z position of the", "{ 'path': depth_image_path, 'channel_0': depth_channel_name } } } views.append(view) view_group = {'views': views}", "transform node. face_name: Name of the cube map face. Raises: ValueError: face is", "view object and add it to the view groups color_image_path = (color_file_path_pattern %", "90) else: raise ValueError('Invalid face_name') def GenerateCameraPositions(headbox_min, headbox_max, num_cameras): \"\"\"Generates camera positions in", "sample = [ i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim", "to the output JSON manifest file. json_only: A boolean value. If true, the", "step is bypassed. \"\"\" # Compute the positions of the cameras. camera_positions =", "because the Maya package is not # defined in the environment where the", "0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "(top + bottom) / (top - bottom) e = (near + far) /", "'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the given", "t=view_group_index, v=position[2]) def CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern): \"\"\"Creates", "near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig", "# Normalize the samples so that their bounding box is the unit cube.", "box_min) offset = map(operator.mul, delta, sample) position = map(operator.add, box_min, offset) return position", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "cube-face 90 degree FOV projection matrix. The created matrix is an OpenGL-style projection", "headbox_max, num_cameras): \"\"\"Generates camera positions in a headbox. Camera posittions are computed as", "[] for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera position", "[w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given face of", "RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a| in base |base|. Args: a:", "num_view_groups: Number of view groups (camera positions) to generate. Must be a power", "RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim] = max(max_sample[dim], sample[dim]) samples.append(sample) headbox_center", "Maya cameras and keyframe their positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) #", "headbox center is replaced by the headbox center itself to include a view", "not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center. headbox_center = PointInBox(headbox_min,", "color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with open(json_file_path, 'w') as json_file: json_file.write(json_string)", "the near clipping plane. far: Eye-space Z position of the far clipping plane.", "greater than near.') left = -near right = near bottom = -near top", "Z position of the far clipping planes. depth_type: A string representing the depth", "'left', 'right', 'bottom', 'top']: # Create a cube face camera and rotate it.", "face_name is 'front': pass elif face_name is 'back': maya.cmds.setAttr(camera_name + '.rotateY', 180) elif", "1.0] # pyformat: disable elif face_name is 'right': return [ 0.0, 0.0, -1.0,", "map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for the given", "of the headbox in world-space. headbox_max: List of three floats representing the upper", "- left) d = (top + bottom) / (top - bottom) e =", "to the headbox center. Finally, the point that is closest to the headbox", "face. Must be one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The", "cube map face name. \"\"\" # Disable the undefined-variable lint error, because the", "and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr',", "unit cube. for dim in xrange(3): sample[dim] /= max_sample[dim] position = PointInBox(headbox_min, headbox_max,", "floats, representing the absolute position of the sample in the box. \"\"\" delta", "the euclidean distance between two points. The points can have an aribtrary number", "inside a box with arbitrary number of dimensions. Args: box_min: A list of", "{ 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G', 'channel_2': 'B', 'channel_alpha': 'A' }, 'depth':", "(negated eye-space Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance", "e = (near + far) / (near - far) f = (2.0 *", "far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') \"\"\" import json import math import operator", "depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and JSON manifest for Seurat.", "sample: A list of floats in the range [0.0, 1.0] representing the relative", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for", "in base |base|. Args: a: The integer number for which the radical inverse", "add it to the view groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path", "digits divided by b^n. return min(reversed_digits / float(base_n), 1.0) def PointInBox(box_min, box_max, sample):", "i / float(num_cameras), RadicalInverse(i, 2), RadicalInverse(i, 3) ] for dim in xrange(3): max_sample[dim]", "-0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power of two. image_size=1024,", "list of 3 floats) representing the positions of the cameras. near_clip: Eye-space Z", "name) and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr',", "positions of the cameras. camera_positions = GenerateCameraPositions(headbox_min, headbox_max, num_view_groups) # Generate the six", "'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must", "c = (right + left) / (right - left) d = (top +", "2.0 (the \"License\"); # you may not use this file except in compliance", "list of floats in the range [0.0, 1.0] representing the relative sample position", "# Only when done are the reversed digits divided by b^n. return min(reversed_digits", "power of two. Returns: A list of 3D points (each a list of", "matrix[4 * row + col] * point[col] # point.w = 1.0 implicitly result_hom[row]", "the frames for the rig animation. Each of the six cameras will get", "image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with", "planes. far_clip: Eye-space Z position of the far clipping planes. depth_type: A string", ", 'top.9999.exr'. Returns: A dictionary representing the view groups. \"\"\" view_groups = []", "the near clipping planes. far_clip: Eye-space Z position of the far clipping planes.", "map face. \"\"\" # pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front':", "result_hom[row] += matrix[4 * row + col] * point[col] # point.w = 1.0", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "as a list in row-major order. Raises: ValueError: face_name is not the name", "done are the reversed digits divided by b^n. return min(reversed_digits / float(base_n), 1.0)", "by the headbox center # itself. sorted_positions[0] = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5])", "is not positive. \"\"\" if num_cameras <= 0: raise ValueError('num_cameras must be positive')", "positions. if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center. headbox_center", "# # Unless required by applicable law or agreed to in writing, software", "result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def WorldFromEyeMatrixFromFace(face_name): \"\"\"Creates world-from-eye matrix for", "# distributed under the License is distributed on an \"AS-IS\" BASIS, # WITHOUT", "group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path:", "group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns:", "f, 0.0, 0.0, -1.0, 0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the", "express or implied. # See the License for the specific language governing permissions", "= position[i] # Create camera object camera = { 'image_width': image_size, 'image_height': image_size,", "where the linter runs. # # pylint: disable=undefined-variable start_time = 0 end_time =", "Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache", "0.0, 0.0, b, d, 0.0, 0.0, 0.0, e, f, 0.0, 0.0, -1.0, 0.0]", "= map(operator.mul, delta, sample) position = map(operator.add, box_min, offset) return position def Distance(point_a,", "not a valid cube map face name. \"\"\" # Disable the undefined-variable lint", "sample in samples: # Normalize the samples so that their bounding box is", "either express or implied. # See the License for the specific language governing", "= 1 # Compute the reversed digits, base b. while a > 0:", "map(operator.sub, box_max, box_min) offset = map(operator.mul, delta, sample) position = map(operator.add, box_min, offset)", "{ 'projective_camera': camera, 'depth_image_file': { 'color': { 'path': color_image_path, 'channel_0': 'R', 'channel_1': 'G',", "Eye-space Z position of the far clipping planes. \"\"\" # Disable the undefined-variable", "range [0.0, 1.0] representing the relative sample position in the box. Returns: A", "reversed_digits = 0 base_n = 1 # Compute the reversed digits, base b.", "'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0,", "image_size: Size of the output images in pixels. near_clip: Eye-space Z position of", "per camera position. Args: camera_positions: A list of 3D points (each a list", "1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf); Arnold's encoding),", "enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0]) maya.cmds.setKeyframe( camera_name, at='translateY', t=view_group_index, v=position[1]) maya.cmds.setKeyframe( camera_name,", "floats representing the lower bounds of the headbox in world-space. headbox_max: List of", "'top'. Returns: The world-from-eye matrix for the given face as a list in", "'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type } # Create view object", "to the view groups color_image_path = (color_file_path_pattern % (face, view_group_index)) depth_image_path = (depth_file_path_pattern", "0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) return sorted_positions def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and", "is 'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0,", "- 1 maya.cmds.playbackOptions( animationStartTime=start_time, animationEndTime=end_time, minTime=start_time, maxTime=end_time) for face in ['front', 'back', 'left',", "the License. # You may obtain a copy of the License at #", "error, because the Maya package is not # defined in the environment where", "base reversed_digits = reversed_digits * base + digit base_n *= base a =", "# pylint: disable=bad-continuation if face_name is 'front': return [ 1.0, 0.0, 0.0, 0.0,", "defined in the environment where the linter runs. # # pylint: disable=undefined-variable start_time", "Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye).", "headbox_min: The lower bounds of the headbox as a list of 3 floats.", "of the near clipping planes. far_clip: Eye-space Z position of the far clipping", "verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes for all positions on", "0.0, 0.0, 0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near,", "+ bottom) / (top - bottom) e = (near + far) / (near", "List of three floats representing the lower bounds of the headbox in world-space.", "json_only=False): \"\"\"Creates a Maya camera rig and JSON manifest for Seurat. Args: headbox_min:", "depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string = json.dumps({'view_groups': view_groups}, indent=2) with open(json_file_path, 'w') as json_file:", "0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable else: raise ValueError('Invalid face_name') def", "def CreateCameras(camera_positions, near_clip, far_clip): \"\"\"Creates and animates the Maya cameras for the rig.", "camera_positions = [] for sample in samples: # Normalize the samples so that", "according to distance to the headbox center. Finally, the point that is closest", "Z position of the far clipping planes. \"\"\" # Disable the undefined-variable lint", "90 degree FOV projection matrix. The created matrix is an OpenGL-style projection matrix.", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "keyframe per camera position. Args: camera_positions: A list of 3D points (each a", "of numbers representing the first point. point_b: A list of numbers representing the", "number of cameras to generate. Should be a power of two. Returns: A", "a sample point inside a box with arbitrary number of dimensions. Args: box_min:", "floats. \"\"\" result_hom = [0.0, 0.0, 0.0, 0.0] for row in xrange(4): for", "Raises: ValueError: face is not a valid cube map face name. \"\"\" #", "cameras to generate. Should be a power of two. Returns: A list of", "Camera position relative to headbox center. position = map(operator.sub, absolute_position, headbox_center) clip_from_eye_matrix =", "= PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for sample in samples:", "maxTime=end_time) for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Create a", "+ face, focalLength=12.7, horizontalFilmAperture=1, verticalFilmAperture=1, nearClipPlane=near_clip, farClipPlane=far_clip)[0] RotateCamera(camera_name, face) # Set translation keyframes", "-1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "face_name: Name of the face. Must be one of 'front', 'back', 'left', 'right',", "The points are transformed such that their bounding box is exactly equal to", "1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "of the face. Must be one of 'front', 'back', 'left', 'right', 'bottom', 'top'.", "the reversed digits, base b. while a > 0: next_a = a /", "be one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix", "camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16,", "row-major order. Raises: ValueError: Invalid clip planes. near <= 0.0 or far <=", "['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera position relative to headbox center.", "0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] # pyformat: disable elif face_name is", "1.0) def PointInBox(box_min, box_max, sample): \"\"\"Computes a sample point inside a box with", "in a headbox. Camera posittions are computed as a 3D Hammersley point set.", "num_view_groups=16, # Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A',", "cameras, one for each cube face, are generated. Each camera is configured with", "The radical inverse as a float in the range [0.0, 1.0). \"\"\" reversed_digits", "box_min: A list of floats representing the lower bounds of the box. box_max:", "a list of 3 floats). image_size: Size of the output images in pixels.", "+ '.rotateY', 90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name", "a Maya camera rig and JSON manifest for Seurat. Args: headbox_min: List of", "cube map face. Raises: ValueError: face is not a valid cube map face", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "License. \"\"\"Generates a JSON manifest and a Maya camera rig for Seurat. Example", "color images. Must contain a placeholder for a string (face name) and an", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "A list of floats in the range [0.0, 1.0] representing the relative sample", "== 1: # Use the headbox center if a single camera position is", "cameras will get one keyframe per camera position. Args: camera_positions: A list of", "headbox center. headbox_center = PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON", ", 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain a placeholder", "+= matrix[4 * row + col] * point[col] # point.w = 1.0 implicitly", "0.5])] samples = [] max_sample = [0.0, 0.0, 0.0] for i in xrange(num_cameras):", "camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix, 'depth_type': depth_type", "of 3 floats. camera_positions: Positions of the cameras as a list of 3D", "= (depth_file_path_pattern % (face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': { 'color':", "headbox_max, [0.5, 0.5, 0.5]) camera_positions = [] for sample in samples: # Normalize", "range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the", "linter runs. # # pylint: disable=undefined-variable start_time = 0 end_time = len(camera_positions) -", "for all positions on this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name,", "file. view_groups = CreateViewGroups(headbox_center, camera_positions, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern) json_string", "range [0.0, 1.0). \"\"\" reversed_digits = 0 base_n = 1 # Compute the", "the headbox as a list of 3 floats. num_cameras: The number of cameras", "delta) distance_sqr = 0.0 for element in delta_sqr: distance_sqr += element return math.sqrt(distance_sqr)", "floats). image_size: Size of the output images in pixels. near_clip: Eye-space Z position", "and animates the Maya cameras for the rig. Six cameras, one for each", "'top.9999.exr'. json_file_path: Path to the output JSON manifest file. json_only: A boolean value.", "pylint: disable=bad-whitespace # pylint: disable=bad-continuation if face_name is 'front': return [ 1.0, 0.0,", "Only when done are the reversed digits divided by b^n. return min(reversed_digits /", "computed. base: The radical inverse is computed in this base (integer). Returns: The", "= (right + left) / (right - left) d = (top + bottom)", "% (face, view_group_index)) view = { 'projective_camera': camera, 'depth_image_file': { 'color': { 'path':", "inverse is computed in this base (integer). Returns: The radical inverse as a", "'depth_type': depth_type } # Create view object and add it to the view", "position def Distance(point_a, point_b): \"\"\"Computes the euclidean distance between two points. The points", "0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "are then sorted according to distance to the headbox center. Finally, the point", "also adjusts the Maya timeline to exactly contain the frames for the rig", "except in compliance with the License. # You may obtain a copy of", "map face. Args: camera_name: Name of the Maya camera's transform node. face_name: Name", "for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for", "is not the name of a cube map face. \"\"\" # pylint: disable=bad-whitespace", "permissions and # limitations under the License. \"\"\"Generates a JSON manifest and a", "camera object camera = { 'image_width': image_size, 'image_height': image_size, 'clip_from_eye_matrix': clip_from_eye_matrix, 'world_from_eye_matrix': world_from_eye_matrix,", "near. \"\"\" if near <= 0.0: raise ValueError('near must be positive.') if far", "depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates a Maya camera rig and JSON manifest", "ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a cube-face 90 degree FOV projection matrix.", "of 16 floats. point: A 3D point represented as a list of 3", "may not use this file except in compliance with the License. # You", "on this camera. for view_group_index, position in enumerate(camera_positions): maya.cmds.setKeyframe( camera_name, at='translateX', t=view_group_index, v=position[0])", "'right': return [ 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0,", "0.0] # pyformat: disable def RadicalInverse(a, base): \"\"\"Computes the radical inverse of |a|", "for Seurat. Args: headbox_min: List of three floats representing the lower bounds of", "camera. Args: headbox_min: The lower bounds of the headbox as a list of", "+ col] * point[col] # point.w = 1.0 implicitly result_hom[row] += matrix[4 *", "0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]", "three floats representing the upper bounds of the headbox in world-space. num_view_groups: Number", "\"\"\" view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions): views = [] for", "headbox as a list of 3 floats. headbox_max: The upper bounds of the", "CreateRig(headbox_min, headbox_max, num_view_groups, image_size, near_clip, far_clip, depth_type, depth_channel_name, color_file_path_pattern, depth_file_path_pattern, json_file_path, json_only=False): \"\"\"Creates", "= PointInBox(headbox_min, headbox_max, [0.5, 0.5, 0.5]) # Generate the JSON manifest and write", "the output JSON manifest file. json_only: A boolean value. If true, the Maya", "1.0] # pyformat: disable else: raise ValueError('Invalid face_name') def CubeFaceProjectionMatrix(near, far): \"\"\"Creates a", "90) elif face_name is 'right': maya.cmds.setAttr(camera_name + '.rotateY', -90) elif face_name is 'bottom':", "string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in", "view_groups = [] for view_group_index, absolute_position in enumerate(camera_positions): views = [] for face", "<reponame>Asteur/vrhelper # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under", "import math import operator def ProjectPoint(matrix, point): \"\"\"Projects a 3D point using a", "of the far clipping plane. Returns: The clip-from-eye matrix as a list in", "near.') left = -near right = near bottom = -near top = near", "the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0,", "row + 3] w = result_hom[3] return map(operator.div, result_hom[0:3], [w, w, w]) def", "to distance to the headbox center. Finally, the point that is closest to", "point: A 3D point represented as a list of 3 floats. Returns: The", "a cube-face 90 degree FOV projection matrix. The created matrix is an OpenGL-style", "if not json_only: CreateCameras(camera_positions, near_clip, far_clip) # Compute the headbox center. headbox_center =", "[0.5, 0.5, 0.5]) camera_positions = [] for sample in samples: # Normalize the", "disable elif face_name is 'top': return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "map(operator.mul, delta, sample) position = map(operator.add, box_min, offset) return position def Distance(point_a, point_b):", "'%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing", "the reference camera. Args: headbox_min: The lower bounds of the headbox as a", "for face in ['front', 'back', 'left', 'right', 'bottom', 'top']: # Camera position relative", "the headbox center is replaced by the headbox center itself to include a", "return [ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 0.0,", "an aribtrary number of dimensions. Args: point_a: A list of numbers representing the" ]
[ "<gh_stars>1-10 # Generated by Django 3.0.5 on 2021-04-18 06:02 from django.db import migrations", "[ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday', ),", "on 2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cse',", "Django 3.0.5 on 2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration): dependencies =", "06:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ]", "'0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday', ), migrations.DeleteModel( name='problemset',", "('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday', ), migrations.DeleteModel(", "class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp',", "] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday', ), migrations.DeleteModel( name='problemset', ),", "Generated by Django 3.0.5 on 2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration):", "Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ),", "2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'),", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations", "import migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations = [", "by Django 3.0.5 on 2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration): dependencies", "operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday', ), migrations.DeleteModel( name='problemset', ), ]", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations =", "3.0.5 on 2021-04-18 06:02 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "= [ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel( name='problemofday',", "# Generated by Django 3.0.5 on 2021-04-18 06:02 from django.db import migrations class", "migrations class Migration(migrations.Migration): dependencies = [ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel(", "dependencies = [ ('cse', '0008_problemset_name'), ] operations = [ migrations.DeleteModel( name='cp', ), migrations.DeleteModel(" ]
[]
[ "func1(c): global r global count if c in r: return else: r.append(c) c=c+1", "r global count if c in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0')", "return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1 func1(c) n=int(input()) count=0 func1(n) print(count)", "global r global count if c in r: return else: r.append(c) c=c+1 c=str(c)", "c in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1 func1(c) n=int(input())", "count=0 r=[] def func1(c): global r global count if c in r: return", "if c in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1 func1(c)", "global count if c in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c)", "in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1 func1(c) n=int(input()) count=0", "def func1(c): global r global count if c in r: return else: r.append(c)", "r=[] def func1(c): global r global count if c in r: return else:", "r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1 func1(c) n=int(input()) count=0 func1(n)", "count if c in r: return else: r.append(c) c=c+1 c=str(c) c=c.rstrip('0') c=int(c) count=count+1", "<gh_stars>0 count=0 r=[] def func1(c): global r global count if c in r:" ]
[ "currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print()", "earnings csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found", "file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for", "Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing with multiple months", "import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings", "in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings", "in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings", "monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col,", "print(\"Found earnings for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats()", "earnings for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for", "Monthly earnings sum col, Testing with multiple months # TODO FundingEarningsCalculator so umstrukturieren,", "print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings", "currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print()", "print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) #", "TODO Monthly earnings sum col, Testing with multiple months # TODO FundingEarningsCalculator so", "sum col, Testing with multiple months # TODO FundingEarningsCalculator so umstrukturieren, dass parsen", "# TODO Monthly earnings sum col, Testing with multiple months # TODO FundingEarningsCalculator", "earnings sum col, Testing with multiple months # TODO FundingEarningsCalculator so umstrukturieren, dass", "csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print()", "csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings", "<gh_stars>1-10 import argparse from funding_earnings_stats import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path", "{}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\")", "= argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings csv file.\") args", "arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings csv file.\") args = arg_parser.parse_args()", "the bitfinex funding earnings csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies", "argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings csv file.\") args =", "print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO", "print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing with multiple months # TODO", "funding earnings csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies()", "csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies in file", "\".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency)) print()", "csv_parser.get_currencies() print(\"Found earnings for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats =", "for currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\")", "FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings csv", "for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly", "path to the bitfinex funding earnings csv file.\") args = arg_parser.parse_args() csv_parser =", "argparse from funding_earnings_stats import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the", "print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing with multiple months #", "{} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in", "bitfinex funding earnings csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies =", "= csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print()", "= csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing", "file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for", "print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings())", "for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency", "args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {}", "print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings()", "csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing with", "print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum col, Testing with multiple", "print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly earnings sum", "to the bitfinex funding earnings csv file.\") args = arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file)", "import argparse from funding_earnings_stats import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to", "= csv_parser.get_currencies() print(\"Found earnings for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats", "args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency])", "{} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies: print(\"Earnings for {}\".format(currency))", "currencies in file {} \".format(len(currencies), args.funding_earnings_file)) currency_stats = csv_parser.get_currency_stats() for currency in currencies:", "= arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies", "print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print()", "arg_parser.parse_args() csv_parser = FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies in", "arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding earnings csv file.\")", "funding_earnings_stats import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex funding", "from funding_earnings_stats import FundingEarningsCalculator arg_parser = argparse.ArgumentParser() arg_parser.add_argument(\"funding_earnings_file\", help=\"The path to the bitfinex", "= FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies in file {}", "FundingEarningsCalculator(args.funding_earnings_file) currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies in file {} \".format(len(currencies),", "print() print(\"---------------------\") print() monthly_earnings = csv_parser.get_monthly_earnings() print(\"Monthly Earnings\") print() print(csv_parser.get_monthly_earnings()) # TODO Monthly", "Testing with multiple months # TODO FundingEarningsCalculator so umstrukturieren, dass parsen ausgelagert ist", "currencies = csv_parser.get_currencies() print(\"Found earnings for {} currencies in file {} \".format(len(currencies), args.funding_earnings_file))", "col, Testing with multiple months # TODO FundingEarningsCalculator so umstrukturieren, dass parsen ausgelagert", "currencies: print(\"Earnings for {}\".format(currency)) print() print(currency_stats[currency]) print() print() print() print(\"---------------------\") print() monthly_earnings =", "help=\"The path to the bitfinex funding earnings csv file.\") args = arg_parser.parse_args() csv_parser" ]
[ "the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying the threshold.", "above the threshold precision = 1.0 if topk == 0 else np.mean([1 if", "test_labels): topk = np.argmax(res_score < threshold) # topk becomes the number of labels", "\"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH", "F1 score vary as threshold varies from 0 to 1. OPTIONS -m, --match", "scores test_labels: numpy array of target labels Returns: Stats object containing threshold topk:", "import pandas as pd from matplotlib import pyplot as plt from datetime import", "plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold)", "and F1 scores are macro (averaged across examples, not labels) \"\"\" precisions =", "directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL}", "plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of", "plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your", "<NAME> (<EMAIL>, <EMAIL>) ''' import click import os import numpy as np import", "on {DATASET}.\") thresholds = list(x / 10000 for x in range(1, 10)) +", "topk = np.argmax(res_score < threshold) # topk becomes the number of labels scoring", "follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of labels predicted. Higher", "x in res_label[:topk] else 0 for x in test_label]) f1 = 0 if", "x in test_label]) f1 = 0 if (precision + recall) == 0 else", "produces three plots from results in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall", "else: if verbose: PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\") now =", "= namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path", "= plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a", "Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1)", "of predicted labels res_scores: numpy array of predicted label scores test_labels: numpy array", "label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1)", "if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have", "= now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH,", "BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on", ") PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\",", "from results in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and are as", "{PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET},", "MATCH folder. plots_path (str): Path of plots folder. verbose (bool): Verbose output. \"\"\"", "= list(x / 10000 for x in range(1, 10)) + \\ list(x /", "{DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1", "{DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH", "# print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s)) if __name__ ==", "= [] ts = [] f1s = [] topks = [] for threshold", "allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True)", "F1 score across examples Note: precision, recall, and F1 scores are macro (averaged", "precision: average precision across examples recall: average recall across examples f1: average F1", "facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF", "not labels) \"\"\" precisions = [] recalls = [] topks = [] f1s", "= os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf()", "e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots", "allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on {DATASET}.\") thresholds", "varying threshold Numbers of labels predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path", "labels) \"\"\" precisions = [] recalls = [] topks = [] f1s =", "collections import namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision", "from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder. -p,", "level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH'", "10)) + \\ list(x / 100 for x in range(1, 10)) + \\", "0 for x in res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk] else", "plots a precision-recall curve by varying the threshold. As threshold decreases from 1", "stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make", "numpy array of predicted label scores test_labels: numpy array of target labels Returns:", "range(1, 10)) ps = [] rs = [] ts = [] f1s =", "plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a plots", "PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py", "Recall, and F1 Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-',", "precision and recall and other statistics on graphs. Args: match_path (str): Path of", "now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH):", "import namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall", "MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset',", "PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\")", "saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE BY THRESHOLD", "plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot", "Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces three plots from results", "range(1, 10)) + \\ list((9990 + x) / 10000 for x in range(1,", "(precision + recall) == 0 else (2 * precision * recall) / (precision", "default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall", "f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to assess the performance of the", "threshold precision: average precision across examples recall: average recall across examples f1: average", "# PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold')", "DESCRIPTION precision_and_recall.py produces three plots from results in MATCH/PeTaL. These three plots appear", "list((90 + x) / 100 for x in range(1, 10)) + \\ list((990", "logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores =", "scores are macro (averaged across examples, not labels) \"\"\" precisions = [] recalls", "/ 1000 for x in range(1, 10)) + \\ list((9990 + x) /", "10)) + \\ list(x / 20 for x in range(2, 19)) + \\", "Path of MATCH folder. plots_path (str): Path of plots folder. verbose (bool): Verbose", "+ x) / 10000 for x in range(1, 10)) ps = [] rs", "the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score vary", "np.mean([1 if x in res_label[:topk] else 0 for x in test_label]) f1 =", "for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision')", "plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v',", "import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m',", "dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p", "CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs,", "have a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str", "F1 scores are macro (averaged across examples, not labels) \"\"\" precisions = []", "verbose: PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str", "f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold,", "plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w',", "goes up (because more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots", "= f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if", "[] f1s = [] topks = [] for threshold in tqdm(thresholds): stats =", "plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts,", "for x in range(1, 10)) + \\ list(x / 1000 for x in", "match_path (str): Path of MATCH folder. plots_path (str): Path of plots folder. verbose", "labels res_scores: numpy array of predicted label scores test_labels: numpy array of target", "folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL',", "topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls),", "else np.mean([1 if x in test_label else 0 for x in res_label[:topk]]) recall", "= [] f1s = [] for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels):", "namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of", "labels Returns: Stats object containing threshold topk: average number of labels above threshold", "of the model. Precision-recall curve Precision, recall, and F1 score by varying threshold", "plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-',", "plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH =", "saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold:", "plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER", "precision, recall, and F1 scores are macro (averaged across examples, not labels) \"\"\"", "label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png')", "as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float,", "dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path,", "-v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors:", "train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL}", "plots_path, dataset, verbose): \"\"\"Plots precision and recall and other statistics on graphs. Args:", "of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d',", "of labels above threshold precision: average precision across examples recall: average recall across", "- HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of labels predicted. Higher threshold", "recall and other statistics on graphs. Args: match_path (str): Path of MATCH folder.", "assess the performance of the model. Precision-recall curve Precision, recall, and F1 score", "os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\")", "precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots',", "'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots", "PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\")", "Note: precision, recall, and F1 scores are macro (averaged across examples, not labels)", "= [] for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score", "test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold < 1.0 res_labels: numpy", "plt from datetime import datetime import logging from collections import namedtuple from tqdm", "folder. -p, --plots PATH/TO/plots Path of plots folder. -d, --dataset PeTaL Name of", "on 10 August 2021. DESCRIPTION precision_and_recall.py produces three plots from results in MATCH/PeTaL.", "MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png,", "0, precision goes down but recall goes up (because more labels get past", "x) / 100 for x in range(1, 10)) + \\ list((990 + x)", "{MODEL} on {DATASET}.\") thresholds = list(x / 10000 for x in range(1, 10))", "--verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME>", "100 for x in range(1, 10)) + \\ list(x / 20 for x", "type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.')", "/ 100 for x in range(1, 10)) + \\ list((990 + x) /", "0 else (2 * precision * recall) / (precision + recall) topks.append(topk) precisions.append(precision)", "10 August 2021. DESCRIPTION precision_and_recall.py produces three plots from results in MATCH/PeTaL. These", "-d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE", "topk becomes the number of labels scoring above the threshold precision = 1.0", "precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s))", "plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps,", "######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on {DATASET}')", "Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)", "in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots", "are macro (averaged across examples, not labels) \"\"\" precisions = [] recalls =", "to 1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots", "\"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold < 1.0 res_labels: numpy array", "of target labels Returns: Stats object containing threshold topk: average number of labels", "else 0 for x in test_label]) f1 = 0 if (precision + recall)", "os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You", "Precision-recall curve Precision, recall, and F1 score by varying threshold Numbers of labels", "precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s)) if __name__ == '__main__': main()", "varying the threshold. As threshold decreases from 1 to 0, precision goes down", "np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing", "performance of the model. Precision-recall curve Precision, recall, and F1 score by varying", "from matplotlib import pyplot as plt from datetime import datetime import logging from", "plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision,", "os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ########################################", "-p, --plots PATH/TO/plots Path of plots folder. -d, --dataset PeTaL Name of dataset,", "Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall')", "recall across examples f1: average F1 score across examples Note: precision, recall, and", "data. Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces three plots from", "plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your", "varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH =", "by varying threshold Numbers of labels predicted by varying threshold ''' ALL_PLOTS_PATH =", "res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots", "topk == 0 else np.mean([1 if x in test_label else 0 for x", "1 to 0, precision goes down but recall goes up (because more labels", "[] for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall)", "@click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path", "for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1)", "required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall and", "'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False,", "+ x) / 100 for x in range(1, 10)) + \\ list((990 +", "+ recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks),", "0 else np.mean([1 if x in test_label else 0 for x in res_label[:topk]])", "<EMAIL>) ''' import click import os import numpy as np import pandas as", "''' import click import os import numpy as np import pandas as pd", "PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a plots", "\\ list(x / 1000 for x in range(1, 10)) + \\ list(x /", "\"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots", "res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk] else 0 for x in", "labels predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH)", "np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold", "print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s)) if __name__ == '__main__':", "--dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3", "pd from matplotlib import pyplot as plt from datetime import datetime import logging", "Parameters: threshold: float, 0.0 < threshold < 1.0 res_labels: numpy array of predicted", "HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying the threshold. As threshold decreases", "os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ########################################", "f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else:", "HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score vary as threshold varies", "statistics on graphs. Args: match_path (str): Path of MATCH folder. plots_path (str): Path", "output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset", "f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose:", "format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels", "@click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name", "allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics", "saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD ########################################", "already have a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid()", "array of predicted label scores test_labels: numpy array of target labels Returns: Stats", "f1s = [] topks = [] for threshold in tqdm(thresholds): stats = compute_stats(threshold,", "array of predicted labels res_scores: numpy array of predicted label scores test_labels: numpy", "by varying threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if", "''' precision_and_recall.py Run MATCH with PeTaL data. Last modified on 10 August 2021.", "folder. plots_path (str): Path of plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig(", "recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p',", "of MATCH folder. -p, --plots PATH/TO/plots Path of plots folder. -d, --dataset PeTaL", "x in range(1, 10)) + \\ list(x / 1000 for x in range(1,", "x in range(1, 10)) + \\ list(x / 100 for x in range(1,", "list(x / 10000 for x in range(1, 10)) + \\ list(x / 1000", "(str): Path of plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s]", "up (because more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how", "np.argmax(res_score < threshold) # topk becomes the number of labels scoring above the", "decreases from 1 to 0, precision goes down but recall goes up (because", "test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to assess", "######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying", "Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels')", "linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png')", "plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels,", "{PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number", "# topk becomes the number of labels scoring above the threshold precision =", "plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for", "pyplot as plt from datetime import datetime import logging from collections import namedtuple", "plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall')", "varies threshold and plots number of labels predicted. Higher threshold means fewer labels", "BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL}", "recall = np.mean([1 if x in res_label[:topk] else 0 for x in test_label])", "MATCH with PeTaL data. Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces", "@click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose):", "Args: match_path (str): Path of MATCH folder. plots_path (str): Path of plots folder.", "zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold) # topk becomes the number", "np.mean([1 if x in test_label else 0 for x in res_label[:topk]]) recall =", "--plots PATH/TO/plots Path of plots folder. -d, --dataset PeTaL Name of dataset, e.g.,", "= [] for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision)", "1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot", "{PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid()", "test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold) # topk becomes", "list(x / 100 for x in range(1, 10)) + \\ list(x / 20", "LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold for", "precision_and_recall.py Run MATCH with PeTaL data. Last modified on 10 August 2021. DESCRIPTION", "test_label]) f1 = 0 if (precision + recall) == 0 else (2 *", "-p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import os import", "# PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and", "res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold) #", "get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1", "examples recall: average recall across examples f1: average F1 score across examples Note:", "logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL =", "* recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision,", "Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True),", "range(1, 10)) + \\ list(x / 1000 for x in range(1, 10)) +", "test_label else 0 for x in res_label[:topk]]) recall = np.mean([1 if x in", "the number of labels scoring above the threshold precision = 1.0 if topk", "x in range(1, 10)) ps = [] rs = [] ts = []", "as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of labels predicted.", "np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\",", "help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall and other", "Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m", "across examples, not labels) \"\"\" precisions = [] recalls = [] topks =", "ps = [] rs = [] ts = [] f1s = [] topks", "Precision, recall, and F1 score by varying threshold Numbers of labels predicted by", "of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def", "AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by", "help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".')", "res_score, test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold) # topk", "more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall,", "@click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True),", "predicted. Higher threshold means fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which", "\"\"\" precisions = [] recalls = [] topks = [] f1s = []", "to 0, precision goes down but recall goes up (because more labels get", "= [] recalls = [] topks = [] f1s = [] for res_label,", "at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\")", "import pyplot as plt from datetime import datetime import logging from collections import", "[] f1s = [] for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk", "1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved", "* precision * recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) #", "verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\")", "object containing threshold topk: average number of labels above threshold precision: average precision", "label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend()", "not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose:", "linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1)", "test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by", "and F1 Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision')", "= compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the", "= np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying", "os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already", "MATCH folder. -p, --plots PATH/TO/plots Path of plots folder. -d, --dataset PeTaL Name", "is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and", "plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot", "= dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True)", "plots appear in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold", "on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1)", "which varies threshold and plots number of labels predicted. Higher threshold means fewer", "topks = [] for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels)", "other statistics on graphs. Args: match_path (str): Path of MATCH folder. plots_path (str):", "in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk)", "in range(1, 10)) + \\ list(x / 1000 for x in range(1, 10))", "x) / 10000 for x in range(1, 10)) ps = [] rs =", "examples, not labels) \"\"\" precisions = [] recalls = [] topks = []", "matplotlib import pyplot as plt from datetime import datetime import logging from collections", "linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH,", "vary as threshold varies from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path", "res_scores, test_labels): topk = np.argmax(res_score < threshold) # topk becomes the number of", "graphs. Args: match_path (str): Path of MATCH folder. plots_path (str): Path of plots", "plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)", "datetime import logging from collections import namedtuple from tqdm import tqdm Stats =", "if (precision + recall) == 0 else (2 * precision * recall) /", "OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots Path of", "float, 0.0 < threshold < 1.0 res_labels: numpy array of predicted labels res_scores:", "plots to assess the performance of the model. Precision-recall curve Precision, recall, and", "res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to", "plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is", "rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0,", "topks = [] f1s = [] for res_label, res_score, test_label in zip(res_labels, res_scores,", "help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset',", "how precision, recall, and F1 score vary as threshold varies from 0 to", "Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>,", "down but recall goes up (because more labels get past the threshold). -", "1.0 res_labels: numpy array of predicted labels res_scores: numpy array of predicted label", "number of labels scoring above the threshold precision = 1.0 if topk ==", "of labels predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH):", "res_labels: numpy array of predicted labels res_scores: numpy array of predicted label scores", "threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying the threshold. As", "\\ list(x / 20 for x in range(2, 19)) + \\ list((90 +", "plots_path (str): Path of plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO,", "scoring above the threshold precision = 1.0 if topk == 0 else np.mean([1", "(averaged across examples, not labels) \"\"\" precisions = [] recalls = [] topks", "of labels scoring above the threshold precision = 1.0 if topk == 0", "[] topks = [] for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores,", "def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold", "of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w',", "x in res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk] else 0 for", "f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path',", "and recall and other statistics on graphs. Args: match_path (str): Path of MATCH", "= 1.0 if topk == 0 else np.mean([1 if x in test_label else", "(str): Path of MATCH folder. plots_path (str): Path of plots folder. verbose (bool):", "of MATCH folder. plots_path (str): Path of plots folder. verbose (bool): Verbose output.", "verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on {DATASET}.\") thresholds = list(x", "< threshold) # topk becomes the number of labels scoring above the threshold", "\"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL", "precision = 1.0 if topk == 0 else np.mean([1 if x in test_label", "/ (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return", "of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose',", "Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs,", "/ 10000 for x in range(1, 10)) ps = [] rs = []", "in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold) # topk becomes the", "== 0 else np.mean([1 if x in test_label else 0 for x in", "rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH,", "1000 for x in range(1, 10)) + \\ list((9990 + x) / 10000", "else: if verbose: PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\") ######################################## #", "1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is", "10000 for x in range(1, 10)) + \\ list(x / 1000 for x", "ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold')", "e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path,", "if x in test_label else 0 for x in res_label[:topk]]) recall = np.mean([1", "facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL,", "for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts,", "August 2021. DESCRIPTION precision_and_recall.py produces three plots from results in MATCH/PeTaL. These three", "res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels", "test_labels: numpy array of target labels Returns: Stats object containing threshold topk: average", "verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a", "labels scoring above the threshold precision = 1.0 if topk == 0 else", "'-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots", "for x in range(1, 10)) + \\ list(x / 100 for x in", "containing threshold topk: average number of labels above threshold precision: average precision across", "from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command()", "Numbers of labels predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path if not", "plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score')", "label scores test_labels: numpy array of target labels Returns: Stats object containing threshold", "list(x / 20 for x in range(2, 19)) + \\ list((90 + x)", "type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision", "10)) ps = [] rs = [] ts = [] f1s = []", "{PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\") ########################################", "import click import os import numpy as np import pandas as pd from", "precision across examples recall: average recall across examples f1: average F1 score across", "topks.append(stats.topk) ''' Make the following plots to assess the performance of the model.", "19)) + \\ list((90 + x) / 100 for x in range(1, 10))", "and F1 score vary as threshold varies from 0 to 1. OPTIONS -m,", "get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying", "+ \\ list(x / 100 for x in range(1, 10)) + \\ list(x", "PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps,", "as pd from matplotlib import pyplot as plt from datetime import datetime import", "above threshold precision: average precision across examples recall: average recall across examples f1:", "######################################## # PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall,", "Path of plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\"", "verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>)", "is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD", "+ \\ list(x / 1000 for x in range(1, 10)) + \\ list(x", "numpy as np import pandas as pd from matplotlib import pyplot as plt", "labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by", "\"\"\"Plots precision and recall and other statistics on graphs. Args: match_path (str): Path", "of plots folder. -d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose", "[] recalls = [] topks = [] f1s = [] for res_label, res_score,", "threshold and plots number of labels predicted. Higher threshold means fewer labels get", "PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold for {MODEL}", "varies from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder.", "means fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall", "in range(1, 10)) + \\ list((9990 + x) / 10000 for x in", "-m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import", "Stats object containing threshold topk: average number of labels above threshold precision: average", "threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score vary as", "0 if (precision + recall) == 0 else (2 * precision * recall)", "Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0,", "if verbose: PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL", "f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s)) if __name__", "threshold: float, 0.0 < threshold < 1.0 res_labels: numpy array of predicted labels", "PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots Path of plots folder. -d,", "../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import os", "import os import numpy as np import pandas as pd from matplotlib import", "recall: average recall across examples f1: average F1 score across examples Note: precision,", "if x in res_label[:topk] else 0 for x in test_label]) f1 = 0", "--match PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots Path of plots folder.", "if topk == 0 else np.mean([1 if x in test_label else 0 for", "[] for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score <", "of Labels Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number", "1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your", "Higher threshold means fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots", "PATH/TO/plots Path of plots folder. -d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\".", "threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH,", "if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\") else: if", "{MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels')", "in res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk] else 0 for x", "/ 100 for x in range(1, 10)) + \\ list(x / 20 for", "Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET =", "facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores,", "score vary as threshold varies from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH", "USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) '''", "PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on {DATASET}.\") thresholds = list(x /", "f1: average F1 score across examples Note: precision, recall, and F1 scores are", "tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) '''", "for x in range(1, 10)) + \\ list(x / 20 for x in", "time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if", "import logging from collections import namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\",", "datetime import datetime import logging from collections import namedtuple from tqdm import tqdm", "folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL,", "compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold < 1.0 res_labels: numpy array of", "0 for x in test_label]) f1 = 0 if (precision + recall) ==", "HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of labels predicted. Higher threshold means", "for x in res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk] else 0", "of labels predicted. Higher threshold means fewer labels get past the threshold. -", "threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You", "list(x / 1000 for x in range(1, 10)) + \\ list(x / 100", "following plots to assess the performance of the model. Precision-recall curve Precision, recall,", "directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a plots directory at", "by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold')", "x in range(1, 10)) + \\ list((9990 + x) / 10000 for x", "(2 * precision * recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1)", "ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have", "and F1 score by varying threshold Numbers of labels predicted by varying threshold", "fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve", "threshold for {MODEL} on {DATASET}.\") thresholds = list(x / 10000 for x in", "(because more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision,", "DATASET = dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\",", "''' Make the following plots to assess the performance of the model. Precision-recall", "not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a plots directory at", "\\ list((90 + x) / 100 for x in range(1, 10)) + \\", "a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve", "[] topks = [] f1s = [] for res_label, res_score, test_label in zip(res_labels,", "target labels Returns: Stats object containing threshold topk: average number of labels above", "res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold < 1.0 res_labels:", "PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ########################################", "three plots from results in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and", "as plt from datetime import datetime import logging from collections import namedtuple from", "''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already", "plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved", "range(1, 10)) + \\ list((990 + x) / 1000 for x in range(1,", "past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying the", "average number of labels above threshold precision: average precision across examples recall: average", "score by varying threshold Numbers of labels predicted by varying threshold ''' ALL_PLOTS_PATH", "for x in range(1, 10)) + \\ list((990 + x) / 1000 for", "for {MODEL} on {DATASET}.\") thresholds = list(x / 10000 for x in range(1,", "as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid()", "@click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True,", "plots from results in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and are", "results in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and are as follows:", "the model. Precision-recall curve Precision, recall, and F1 score by varying threshold Numbers", "import datetime import logging from collections import namedtuple from tqdm import tqdm Stats", "precision * recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk],", "NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by", "As threshold decreases from 1 to 0, precision goes down but recall goes", "plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved", "threshold varies from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH", "\"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.') def main(match_path, plots_path, dataset,", "by varying threshold for {MODEL} on {DATASET}.\") thresholds = list(x / 10000 for", "precision_and_recall.py produces three plots from results in MATCH/PeTaL. These three plots appear in", "recalls = [] topks = [] f1s = [] for res_label, res_score, test_label", "10)) + \\ list((990 + x) / 1000 for x in range(1, 10))", "type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset, e.g.,", "plots number of labels predicted. Higher threshold means fewer labels get past the", "os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def", "directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment", "verbose: PRlogger.info(f\"You already have a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE", "< threshold < 1.0 res_labels: numpy array of predicted labels res_scores: numpy array", "which plots a precision-recall curve by varying the threshold. As threshold decreases from", "now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\")", "for x in range(2, 19)) + \\ list((90 + x) / 100 for", "transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS", "%(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels =", "x in test_label else 0 for x in res_label[:topk]]) recall = np.mean([1 if", "100 for x in range(1, 10)) + \\ list((990 + x) / 1000", "in range(1, 10)) + \\ list((990 + x) / 1000 for x in", "+ \\ list((9990 + x) / 10000 for x in range(1, 10)) ps", "PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str =", "= logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores", "THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold for {MODEL} on", "labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and", "[] rs = [] ts = [] f1s = [] topks = []", "10)) + \\ list(x / 1000 for x in range(1, 10)) + \\", "if verbose: PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now()", "Labels Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of", "0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder. -p, --plots", "\\ list((990 + x) / 1000 for x in range(1, 10)) + \\", "are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of labels", "number of labels predicted. Higher threshold means fewer labels get past the threshold.", "= np.mean([1 if x in res_label[:topk] else 0 for x in test_label]) f1", "plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as", "= 0 if (precision + recall) == 0 else (2 * precision *", "res_scores: numpy array of predicted label scores test_labels: numpy array of target labels", "'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of dataset,", "dataset, verbose): \"\"\"Plots precision and recall and other statistics on graphs. Args: match_path", "output.') def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall and other statistics", "ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to assess the", "numpy array of predicted labels res_scores: numpy array of predicted label scores test_labels:", "'-d', 'dataset', default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False,", "PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1", "curve Precision, recall, and F1 score by varying threshold Numbers of labels predicted", "thresholds = list(x / 10000 for x in range(1, 10)) + \\ list(x", "three plots appear in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies", "as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ########################################", "PeTaL data. Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces three plots", "../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import os import numpy", "at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment =", "and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number of", "topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH,", "{PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0", "+ recall) == 0 else (2 * precision * recall) / (precision +", "transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels):", "varying threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose:", "examples f1: average F1 score across examples Note: precision, recall, and F1 scores", "SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold for", "Path of plots folder. -d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v,", "= os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at", "/ 10000 for x in range(1, 10)) + \\ list(x / 1000 for", "the performance of the model. Precision-recall curve Precision, recall, and F1 score by", "appear in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and", "labels predicted. Higher threshold means fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png,", "= np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on", "predicted label scores test_labels: numpy array of target labels Returns: Stats object containing", "plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False)", "/ 20 for x in range(2, 19)) + \\ list((90 + x) /", "compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold <", "at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on", "Path of MATCH folder. -p, --plots PATH/TO/plots Path of plots folder. -d, --dataset", "threshold Numbers of labels predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path if", "f1s = [] for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk =", "(precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold,", "across examples recall: average recall across examples f1: average F1 score across examples", "+ x) / 1000 for x in range(1, 10)) + \\ list((9990 +", "plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE", "precisions = [] recalls = [] topks = [] f1s = [] for", "plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as", "res_label[:topk] else 0 for x in test_label]) f1 = 0 if (precision +", "PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\")", "across examples Note: precision, recall, and F1 scores are macro (averaged across examples,", "recall, and F1 score by varying threshold Numbers of labels predicted by varying", "plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\")", "{MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s,", "plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED BY", "def main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall and other statistics on", "x in range(2, 19)) + \\ list((90 + x) / 100 for x", "date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH =", "label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH,", "for x in range(1, 10)) + \\ list((9990 + x) / 10000 for", "macro (averaged across examples, not labels) \"\"\" precisions = [] recalls = []", "compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following", "x in range(1, 10)) + \\ list(x / 20 for x in range(2,", "1000 for x in range(1, 10)) + \\ list(x / 100 for x", "in range(1, 10)) + \\ list(x / 20 for x in range(2, 19))", "\\ list(x / 100 for x in range(1, 10)) + \\ list(x /", "have a plots directory at {PLOTS_PATH}\") ######################################## # PRECISION-RECALL CURVE ######################################## plt.grid() plt.title(f'Precision-Recall", "number of labels above threshold precision: average precision across examples recall: average recall", "2021. DESCRIPTION precision_and_recall.py produces three plots from results in MATCH/PeTaL. These three plots", "else 0 for x in res_label[:topk]]) recall = np.mean([1 if x in res_label[:topk]", "x) / 1000 for x in range(1, 10)) + \\ list((9990 + x)", "precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click", "linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w',", "- HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score vary as threshold", "== 0 else (2 * precision * recall) / (precision + recall) topks.append(topk)", "allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold for", "plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION,", "by varying the threshold. As threshold decreases from 1 to 0, precision goes", "+ \\ list((90 + x) / 100 for x in range(1, 10)) +", "of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH", "for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold)", "ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to assess the performance of", "x in range(1, 10)) + \\ list((990 + x) / 1000 for x", "threshold topk: average number of labels above threshold precision: average precision across examples", "{MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0,", "= np.argmax(res_score < threshold) # topk becomes the number of labels scoring above", "and plots number of labels predicted. Higher threshold means fewer labels get past", "plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0,", "precision, recall, and F1 score vary as threshold varies from 0 to 1.", "is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND F1 SCORE BY", "= os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf()", "in range(2, 19)) + \\ list((90 + x) / 100 for x in", "-m, --match PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots Path of plots", "folder. -d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable verbosity.", "threshold precision = 1.0 if topk == 0 else np.mean([1 if x in", "on graphs. Args: match_path (str): Path of MATCH folder. plots_path (str): Path of", "Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import os import numpy as np", "plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger", "of predicted label scores test_labels: numpy array of target labels Returns: Stats object", "in MATCH/PeTaL. These three plots appear in plots/YYYYMMDD_precision_recall and are as follows: -", "list((990 + x) / 1000 for x in range(1, 10)) + \\ list((9990", "F1 Score by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts,", "namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\")", "os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory at {PLOTS_PATH}\")", "score across examples Note: precision, recall, and F1 scores are macro (averaged across", "plots folder. -d, --dataset PeTaL Name of dataset, e.g., \"PeTaL\". -v, --verbose Enable", "dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels", "+ \\ list(x / 20 for x in range(2, 19)) + \\ list((90", "= datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\"", "model. Precision-recall curve Precision, recall, and F1 score by varying threshold Numbers of", "plt.grid() plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts, topks,", "average recall across examples f1: average F1 score across examples Note: precision, recall,", "1. OPTIONS -m, --match PATH/TO/MATCH Path of MATCH folder. -p, --plots PATH/TO/plots Path", "######################################## plt.grid() plt.title(f'Precision-Recall Curve for {MODEL} on {DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-')", "(bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger = logging.getLogger(\"P&R\") DATASET", "range(1, 10)) + \\ list(x / 100 for x in range(1, 10)) +", "= [] rs = [] ts = [] f1s = [] topks =", "the threshold. As threshold decreases from 1 to 0, precision goes down but", "- HHMMSS_prc_MATCH_PeTaL.png, which plots a precision-recall curve by varying the threshold. As threshold", "f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH =", "= [] topks = [] f1s = [] for res_label, res_score, test_label in", "main(match_path, plots_path, dataset, verbose): \"\"\"Plots precision and recall and other statistics on graphs.", "res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if", "in test_label]) f1 = 0 if (precision + recall) == 0 else (2", "= now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not", "######################################## # NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels", "examples Note: precision, recall, and F1 scores are macro (averaged across examples, not", "on {DATASET}') plt.plot(ts, topks, linestyle='-', label='Number of Labels') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Labels') plt.legend()", "folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" ) PRlogger =", "precision goes down but recall goes up (because more labels get past the", "< 1.0 res_labels: numpy array of predicted labels res_scores: numpy array of predicted", "else (2 * precision * recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall)", "by Threshold for {MODEL} on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-',", "--verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import click import os import numpy as", "transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # PRECISION, RECALL, AND", "= os.path.join(PLOTS_PATH, f'{time_str}_labels_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf()", "20 for x in range(2, 19)) + \\ list((90 + x) / 100", "# NUMBER OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted", "Run MATCH with PeTaL data. Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py", "and other statistics on graphs. Args: match_path (str): Path of MATCH folder. plots_path", "ts = [] f1s = [] topks = [] for threshold in tqdm(thresholds):", "for x in test_label]) f1 = 0 if (precision + recall) == 0", "1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as", "modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces three plots from results in", "range(1, 10)) + \\ list(x / 20 for x in range(2, 19)) +", "RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score", "logging from collections import namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold", "threshold decreases from 1 to 0, precision goes down but recall goes up", "These three plots appear in plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which", "/ 1000 for x in range(1, 10)) + \\ list(x / 100 for", "threshold. As threshold decreases from 1 to 0, precision goes down but recall", "if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a plots directory", "1.0 if topk == 0 else np.mean([1 if x in test_label else 0", "default='PeTaL', help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose", "rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1) topks.append(stats.topk) ''' Make the following plots to assess the performance", "verbose): \"\"\"Plots precision and recall and other statistics on graphs. Args: match_path (str):", "PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\"", "recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions),", "now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" #", "recall) / (precision + recall) topks.append(topk) precisions.append(precision) recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall)", "{DATASET}.\") thresholds = list(x / 10000 for x in range(1, 10)) + \\", "already have a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\")", "plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prc_{MODEL}_{DATASET}.png')", "in res_label[:topk] else 0 for x in test_label]) f1 = 0 if (precision", "is saved as {PLOT_PATH}\") plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters:", "plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is", "PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## # NUMBER OF LABELS PREDICTED", "recall, and F1 scores are macro (averaged across examples, not labels) \"\"\" precisions", "(<EMAIL>, <EMAIL>) ''' import click import os import numpy as np import pandas", "recall, and F1 score vary as threshold varies from 0 to 1. OPTIONS", "rs = [] ts = [] f1s = [] topks = [] for", "{ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\"", "PRECISION, RECALL, AND F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1", "labels above threshold precision: average precision across examples recall: average recall across examples", "PRlogger = logging.getLogger(\"P&R\") DATASET = dataset MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True)", "\\ list((9990 + x) / 10000 for x in range(1, 10)) ps =", "from collections import namedtuple from tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk", "OF LABELS PREDICTED BY THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold", "precision-recall curve by varying the threshold. As threshold decreases from 1 to 0,", "linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0,", "[] ts = [] f1s = [] topks = [] for threshold in", "the following plots to assess the performance of the model. Precision-recall curve Precision,", "as threshold varies from 0 to 1. OPTIONS -m, --match PATH/TO/MATCH Path of", "plt.clf() def compute_stats(threshold, res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 <", "a precision-recall curve by varying the threshold. As threshold decreases from 1 to", "F1 SCORE BY THRESHOLD ######################################## plt.grid() plt.title(f'Precision, Recall, and F1 Score by Threshold", "= [] topks = [] for threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels,", "plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH", "but recall goes up (because more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png,", "recalls.append(recall) f1s.append(f1) # print(res_label[:topk], precision, recall) return Stats(threshold, np.mean(topks), np.mean(precisions), np.mean(recalls), np.mean(f1s)) if", "tqdm import tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match',", "f'{time_str}_prc_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## #", "######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}') plt.plot(ts,", "os.mkdir(ALL_PLOTS_PATH) else: if verbose: PRlogger.info(f\"You already have a plots directory at {ALL_PLOTS_PATH}.\") now", "= np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels = np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose:", "np.load(f\"{match_path}/{DATASET}/train_labels.npy\", allow_pickle=True) if verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on {DATASET}.\")", "10)) + \\ list((9990 + x) / 10000 for x in range(1, 10))", "plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-', label='F1 score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics')", "\"PeTaL\". -v, --verbose Enable verbosity. USAGE python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose", "+ \\ list((990 + x) / 1000 for x in range(1, 10)) +", "THRESHOLD ######################################## plt.grid() plt.title(f'Number of Labels Predicted by Threshold for {MODEL} on {DATASET}')", "from 1 to 0, precision goes down but recall goes up (because more", "10000 for x in range(1, 10)) ps = [] rs = [] ts", "MODEL = 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels =", "to assess the performance of the model. Precision-recall curve Precision, recall, and F1", "python3 precision_and_recall.py -m ../src/MATCH -p ../plots --verbose Authors: <NAME> (<EMAIL>, <EMAIL>) ''' import", "becomes the number of labels scoring above the threshold precision = 1.0 if", "'-p', 'plots_path', type=click.Path(exists=True), help='Path of plots folder.') @click.option('--dataset', '-d', 'dataset', default='PeTaL', help='Name of", "predicted by varying threshold ''' ALL_PLOTS_PATH = plots_path if not os.path.exists(ALL_PLOTS_PATH): os.mkdir(ALL_PLOTS_PATH) else:", "click import os import numpy as np import pandas as pd from matplotlib", "'-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.') @click.option('--plots', '-p', 'plots_path', type=click.Path(exists=True), help='Path of", "which plots how precision, recall, and F1 score vary as threshold varies from", "os import numpy as np import pandas as pd from matplotlib import pyplot", "= 'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\",", "plots/YYYYMMDD_precision_recall and are as follows: - HHMMSS_labels_MATCH_PeTaL.png, which varies threshold and plots number", "if verbose: PRlogger.info(f\"Computing statistics by varying threshold for {MODEL} on {DATASET}.\") thresholds =", "list((9990 + x) / 10000 for x in range(1, 10)) ps = []", "Make the following plots to assess the performance of the model. Precision-recall curve", "varying threshold for {MODEL} on {DATASET}.\") thresholds = list(x / 10000 for x", "topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path', type=click.Path(exists=True), help='Path of MATCH folder.')", "= np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True) train_labels =", "statistics by varying threshold for {MODEL} on {DATASET}.\") thresholds = list(x / 10000", "0.0 < threshold < 1.0 res_labels: numpy array of predicted labels res_scores: numpy", "in range(1, 10)) ps = [] rs = [] ts = [] f1s", "array of target labels Returns: Stats object containing threshold topk: average number of", "f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH, facecolor='w', transparent=False) PRlogger.info(f\"Your plot is saved as {PLOT_PATH}\") plt.clf() ######################################## #", "F1 score by varying threshold Numbers of labels predicted by varying threshold '''", "f1 = 0 if (precision + recall) == 0 else (2 * precision", "plots directory at {PLOTS_PATH}\") else: if verbose: PRlogger.info(f\"You already have a plots directory", "threshold < 1.0 res_labels: numpy array of predicted labels res_scores: numpy array of", "threshold) # topk becomes the number of labels scoring above the threshold precision", "res_labels, res_scores, test_labels): \"\"\" compute_stats(threshold) Parameters: threshold: float, 0.0 < threshold < 1.0", "recall goes up (because more labels get past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which", "past the threshold). - HHMMSS_prf1_MATCH_PeTaL.png, which plots how precision, recall, and F1 score", "with PeTaL data. Last modified on 10 August 2021. DESCRIPTION precision_and_recall.py produces three", "recall) == 0 else (2 * precision * recall) / (precision + recall)", "range(2, 19)) + \\ list((90 + x) / 100 for x in range(1,", "on {DATASET}') plt.plot(ts, ps, linestyle='-', label='Precision') plt.plot(ts, rs, linestyle='-', label='Recall') plt.plot(ts, f1s, linestyle='-',", "average F1 score across examples Note: precision, recall, and F1 scores are macro", "average precision across examples recall: average recall across examples f1: average F1 score", "as np import pandas as pd from matplotlib import pyplot as plt from", "for x in range(1, 10)) ps = [] rs = [] ts =", "= [] f1s = [] topks = [] for threshold in tqdm(thresholds): stats", "PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New plots directory", "numpy array of target labels Returns: Stats object containing threshold topk: average number", "tqdm Stats = namedtuple(\"Stats\", \"threshold topk precision recall f1\") @click.command() @click.option('--match', '-m', 'match_path',", "across examples f1: average F1 score across examples Note: precision, recall, and F1", "predicted labels res_scores: numpy array of predicted label scores test_labels: numpy array of", "in range(1, 10)) + \\ list(x / 100 for x in range(1, 10))", "score') plt.xlabel('Threshold') plt.xlim(0, 1) plt.ylabel('Metrics') plt.ylim(0, 1) plt.legend() PLOT_PATH = os.path.join(PLOTS_PATH, f'{time_str}_prf1_{MODEL}_{DATASET}.png') plt.savefig(fname=PLOT_PATH,", "the threshold precision = 1.0 if topk == 0 else np.mean([1 if x", "pandas as pd from matplotlib import pyplot as plt from datetime import datetime", "comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH)", "curve by varying the threshold. As threshold decreases from 1 to 0, precision", "threshold means fewer labels get past the threshold. - HHMMSS_prc_MATCH_PeTaL.png, which plots a", "plots how precision, recall, and F1 score vary as threshold varies from 0", "from datetime import datetime import logging from collections import namedtuple from tqdm import", "datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str = now.strftime(\"%H%M%S\") comment = f\"precision_recall\" # \"_on_{DATASET}\" PLOTS_PATH", "a plots directory at {ALL_PLOTS_PATH}.\") now = datetime.now() date_str = now.strftime(\"%Y%m%d\") time_str =", "'MATCH' res_labels = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-labels.npy\", allow_pickle=True) res_scores = np.load(f\"{match_path}/{DATASET}/results/{MODEL}-{DATASET}-scores.npy\", allow_pickle=True) test_labels = np.load(f\"{match_path}/{DATASET}/test_labels.npy\", allow_pickle=True)", "{DATASET}, varying threshold') plt.plot(ps, rs, linestyle='-') plt.xlabel('Recall') plt.xlim(0, 1) plt.ylabel('Precision') plt.ylim(0, 1) PLOT_PATH", "Returns: Stats object containing threshold topk: average number of labels above threshold precision:", "threshold in tqdm(thresholds): stats = compute_stats(threshold, res_labels, res_scores, test_labels) ps.append(stats.precision) rs.append(stats.recall) ts.append(threshold) f1s.append(stats.f1)", "of plots folder. verbose (bool): Verbose output. \"\"\" logging.basicConfig( level=logging.INFO, format=\"[%(asctime)s:%(name)s] %(message)s\" )", "topk: average number of labels above threshold precision: average precision across examples recall:", "import numpy as np import pandas as pd from matplotlib import pyplot as", "goes down but recall goes up (because more labels get past the threshold).", "for res_label, res_score, test_label in zip(res_labels, res_scores, test_labels): topk = np.argmax(res_score < threshold)", "np import pandas as pd from matplotlib import pyplot as plt from datetime", "help='Name of dataset, e.g., \"PeTaL\".') @click.option('--verbose', '-v', type=click.BOOL, is_flag=True, default=False, required=False, help='Verbose output.')", "in test_label else 0 for x in res_label[:topk]]) recall = np.mean([1 if x", "# \"_on_{DATASET}\" PLOTS_PATH = os.path.join(ALL_PLOTS_PATH, f\"{date_str}_{comment}\") if not os.path.exists(PLOTS_PATH): os.mkdir(PLOTS_PATH) if verbose: PRlogger.info(f\"New" ]
[ "####################데이터프레임의 문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas", "aaa 00001 # # 1 2 bbb 00002 # # 2 10 ccc", "# row_g1 0 1 2 3 4 # row_g2 5 6 7 8", "# # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # #", "9 11 13 # row_g2 25 27 29 31 33 msc = Series(mdc)", "# # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby", "00020 00020_ddd 20.00 DDD # # 4 100 eee 00100 00100_eee 100.00 EEE", "ddd 00020 00020_ddd 20.00 DDD # # 4 100 eee 00100 00100_eee 100.00", "eee 00100 00100_eee 100.00 EEE # # 5 200 fff 00200 00200_fff 200.00", "바꿔줌 # print(df) # # id name id_2 id_name id_3 name_3 # #", "200 fff 00200 # # # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. #", "키로 자료를 밸류로 나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분.", "id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x:", "AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB # # 2", "5 6 7 8 9 # r3 10 11 12 13 14 #", "새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기", "# # # 7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777 # #", "00002 00002_bbb 2.00 BBB 2.00:BBB # # 2 10 ccc 00010 00010_ccc 10.00", "7 8 9 # row_g3 10 11 12 13 14 # row_g4 15", "# df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) # # id name", "# r3 21 39 # r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나", "= {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1", "00002_bbb 2.00 # # 2 10 ccc 00010 00010_ccc 10.00 # # 3", "name_3 id_name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA #", "{'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2", "#함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에", "id_name # # 0 1 aaa 00001 00001_aaa # # 1 2 bbb", "5 6 7 8 9 # row_g3 10 11 12 13 14 #", "1 9 # r2 11 24 # r3 21 39 # r4 31", "c2 c3 c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2", "sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) #", "# # 4 100 eee 00100 00100_eee 100.00 # # 5 200 fff", "100.00 EEE # # 5 200 fff 00200 00200_fff 200.00 FFF # #", "id_3 name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA # #", "axis = 1을주어야 한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc =", "# 3 20 ddd 00020 00020_ddd 20.00 # # 4 100 eee 00100", "# print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) # # # 3 #", "eee 00100 00100_eee # # 5 200 fff 00200 00200_fff # # #", "15.5 16.5 print(gbr.std()) # c1 c2 c3 c4 c5 # row_g1 3.535534 3.535534", "print(\"{:+.2f}\".format(x)) # # # -3.14 # # # # x=2.718 # # print(\"{:.0f}\".format(x))", "# # 3.14 # # # # print(\"{:+.2f}\".format(x)) # # # +3.14 #", "20 ddd 00020 00020_ddd # # 4 100 eee 00100 00100_eee # #", "00100_eee 100.00 EEE # # 5 200 fff 00200 00200_fff 200.00 FFF #", "# 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # # 2 10", "# 0 1 aaa 00001 00001_aaa 1.00 AAA # # 1 2 bbb", "00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # #", "2.5 3.5 4.5 5.5 6.5 # row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std())", "00020_ddd 20.00 DDD # # 4 100 eee 00100 00100_eee 100.00 EEE #", "5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df)", "5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 #", "c3 c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534", "16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1", "# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df)", "NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df)", "200 fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로", "= df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id name id_2 id_name id_3", "= r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum()) #", "#축을 지정 하지 않으면 안됨 # print(df) # # id name id_2 id_name", "# c1 c2 c3 c4 c5 # r1 0 1 2 3 4", "00010_ccc 10.00 CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD #", "9 # r3 10 11 12 13 14 # r4 15 16 17", "4 # row_g2 5 6 7 8 9 # row_g3 10 11 12", "# 1 2 bbb 00002 00002_bbb 2.00 BBB # # 2 10 ccc", ") # # # 05 # # # # x=7777777777 # # print(\"{:0>5d}\".format(x))", "자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5 #", "# id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df)", "#위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 # data= : 데이터를 넣고", "# # # # print(\"{:+.2f}\".format(x)) # # # +3.14 # # # #", "# # 4 100 eee 00100 NaN # # 5 200 fff 00200", "pandas as pd import numpy as np from pandas import DataFrame, Series #", "rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰", "00100 00100_eee 100.00 EEE 100.00:EEE # # 5 200 fff 00200 00200_fff 200.00", "def rgf(x) : if x == 'r1' or x == 'r2': rg =", "200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌", "df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2 # # 0 1", "# # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) #", "반올림) # # # 3 # # # # x=3.147592 # # print(\"{:.2f}\".format(x))", "# # #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨)", "# row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2 : 열단위", "df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id name id_2 id_name # #", "100 eee 00100 NaN # # 5 200 fff 00200 NaN # #", "0 1 2 3 4 # r2 5 6 7 8 9 #", "# x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 ) #", "6 7 8 9 # row_g3 10 11 12 13 14 # row_g4", "# # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 ) # # #", "# # 0 1 aaa 00001 00001_aaa 1.00 AAA # # 1 2", "2 10 ccc 00010 00010_ccc # # 3 20 ddd 00020 00020_ddd #", "df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id name id_2 id_name", "c4 c5 # row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2 12.5 13.5", "r3 21 39 # r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr) #", "ddd 00020 # # 4 100 eee 00100 # # 5 200 fff", "or x == 'r2': rg = 'row_g1' else: rg = 'row_g2' return rg", "# 05 # # # # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가", "c3 c4 c5 # row_g1 0 1 2 3 4 # row_g2 5", "# # 5 200 fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x:", "# # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # # 1", "됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨 #", "# #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의 id 컬럼을", "c5 # r1 0 1 2 3 4 # r2 5 6 7", "이상은 무시=>원 형태 유지) # # # 7777777777 # # print(\"{:,}\".format(x)) # #", "5 200 fff 00200 00200_fff 200.00 FFF # # # # id_name_3 컬럼추가", "# # # 3.15 # # # # x=5 # # print(\"{:0>2d}\".format(x)) #", "00001 00001_aaa 1.00 AAA 1.00:AAA # # 1 2 bbb 00002 00002_bbb 2.00", "x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14 # # # # x=2.718", "=> 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # #", "ccc 00010 00010_ccc # # 3 20 ddd 00020 00020_ddd # # 4", "00010 00010_ccc # # 3 20 ddd 00020 00020_ddd # # 4 100", "import numpy as np from pandas import DataFrame, Series # df = pd.DataFrame({'id'", "더해진다.: sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum())", "x: ':'.join(x),axis=1) # print(df) # # id name id_2 id_name id_3 name_3 id_name_3", "00002 # # 2 10 ccc 00010 # # 3 20 ddd 00020", "2 10 ccc 00010 00010_ccc 10.00 # # 3 20 ddd 00020 00020_ddd", "1 2 bbb 00002 00002_bbb 2.00 BBB # # 2 10 ccc 00010", "pd import numpy as np from pandas import DataFrame, Series # df =", "3 20 ddd 00020 NaN # # 4 100 eee 00100 NaN #", "16 17 18 19 # row_g1 = r1+r2 : 행단위 계산으로 새로운 행", "df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id name id_2 id_name", "한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis", "정보가 x에 전달) def rgf(x) : if x == 'r1' or x ==", "name id_2 id_name # # 0 1 aaa 00001 NaN # # 1", "{'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1", "'row_g2'} # dic -> Series # Series를 이용한 그룹화 msr = Series(mdr) print(type(msr))", "00200_fff # # # #id를 소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda", "# 3.15 # # # # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가", "# row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3 c4", "# # 2 10 ccc 00010 # # 3 20 ddd 00020 #", "# 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # # 3 20", "print(\"{:.2%}\".format(x)) # # # 25.00% # # # # #name + id_2 :재구성", "c3 c4 c5 # r1 0 1 2 3 4 # r2 5", "정수를 출력하라(소수 점 첫째자리에서 반올림) # # # 3 # # # #", "df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1 1", "31 54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3':", "2 10 ccc 00010 NaN # # 3 20 ddd 00020 NaN #", "00020 # # 4 100 eee 00100 # # 5 200 fff 00200", "# # 0 1 aaa 00001 # # 1 2 bbb 00002 #", "'row_g2', 'r4': 'row_g2'} # dic -> Series # Series를 이용한 그룹화 msr =", "x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 ) # #", "# # # # x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00% #", "# # 3.15 # # # # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D:", "18 19 # row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의", "r3 21 39 # r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈", "대한 정보가 x에 전달) def rgf(x) : if x == 'r1' or x", "NaN # # 4 100 eee 00100 NaN # # 5 200 fff", "# # print(\"{:.2f}\".format(x)) # # # 3.14 # # # # print(\"{:+.2f}\".format(x)) #", "00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위..", "Series # Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr)", "=> 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x))", "# x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14 # # # #", "나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id", "eee 00100 00100_eee 100.00 # # 5 200 fff 00200 00200_fff 200.00 #", "# # 2 10 ccc 00010 00010_ccc 10.00 # # 3 20 ddd", "# # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) # # id", "00020 00020_ddd 20.00 # # 4 100 eee 00100 00100_eee 100.00 # #", "#upper() : 대문자로 바꿔줌 # print(df) # # id name id_2 id_name id_3", "# 2 10 ccc 00010 00010_ccc 10.00 CCC # # 3 20 ddd", "row_g1 # r2 row_g1 # r3 row_g2 # r4 row_g2 # dtype: object", "== 'r2': rg = 'row_g1' else: rg = 'row_g2' return rg # 딕셔너리나", "# 1 2 bbb 00002 # # 2 10 ccc 00010 # #", "2 bbb 00002 NaN # # 2 10 ccc 00010 NaN # #", "# 1 2 bbb 00002 NaN # # 2 10 ccc 00010 NaN", "c1 c2 c3 c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 #", "= r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3", "20.00 DDD # # 4 100 eee 00100 00100_eee 100.00 EEE # #", "# # 1 2 bbb 00002 00002_bbb 2.00 # # 2 10 ccc", "r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1',", "4 100 eee 00100 # # 5 200 fff 00200 # # #", "# # # +3.14 # # # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) #", "# 5 200 fff 00200 00200_fff 200.00 FFF # # # # id_name_3", "00002 00002_bbb # # 2 10 ccc 00010 00010_ccc # # 3 20", "df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id name id_2 id_name id_3 name_3", "3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2", "#id를 소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df)", "c4 c5 # row_g1 5 7 9 11 13 # row_g2 25 27", "100 eee 00100 00100_eee 100.00 EEE # # 5 200 fff 00200 00200_fff", "'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series # Series를 이용한", "col_g1 col_g2 # r1 1 9 # r2 11 24 # r3 21", "x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) # # id name id_2 id_name", "row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2 c3", "5 7 9 11 13 # row_g2 25 27 29 31 33 msc", "+3.14 # # # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14", "# # # 25.00% # # # # #name + id_2 :재구성 =>", "1 aaa 00001 00001_aaa 1.00 # # 1 2 bbb 00002 00002_bbb 2.00", "<class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} #", "00001_aaa 1.00 AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB #", "df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 0 1 2", "생성####################################### #이용함수 apply import pandas as pd import numpy as np from pandas", "00020 00020_ddd # # 4 100 eee 00100 00100_eee # # 5 200", "00100 00100_eee 100.00 EEE # # 5 200 fff 00200 00200_fff 200.00 FFF", "# # id name id_2 id_name # # 0 1 aaa 00001 NaN", "# id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda", "대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x) : if x", "print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지) #", "2.00:BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # #", "# # 7,777,777,777 # # # # x=0.25 # # print(\"{:.2%}\".format(x)) # #", "# 3 20 ddd 00020 NaN # # 4 100 eee 00100 NaN", "25 27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 #", "x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14 # # # # print(\"{:+.2f}\".format(x))", "21 39 # r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신", "# id name id_2 id_name id_3 # # 0 1 aaa 00001 00001_aaa", "# # # print(\"{:+.2f}\".format(x)) # # # +3.14 # # # # x=-3.141592", "# ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로", ".2f(소수 점 셋째자리에서 반올림) # # # 3.15 # # # # x=5", "3 20 ddd 00020 00020_ddd # # 4 100 eee 00100 00100_eee #", "mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5", "# \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운", "id name id_2 # # 0 1 aaa 00001 # # 1 2", "# # # x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00% # #", "1 aaa 00001 00001_aaa # # 1 2 bbb 00002 00002_bbb # #", "'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series # Series를 이용한 그룹화", "else: rg = 'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여", "0으로 채워라 ,너비 이상은 무시=>원 형태 유지) # # # 7777777777 # #", "데이터를 넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2", "인자의 모양을 바꿔준다. # # # # x=3.141592 # # print(\"{:.2f}\".format(x)) # #", "21 39 # r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1':", "row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum())", "r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4", "row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2", "1 2 bbb 00002 # # 2 10 ccc 00010 # # 3", "# 2 10 ccc 00010 NaN # # 3 20 ddd 00020 NaN", "#축을 1로 지정 # print(df) # # id name id_2 id_name # #", "# 3 20 ddd 00020 00020_ddd # # 4 100 eee 00100 00100_eee", "# <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'}", "# r2 11 24 # r3 21 39 # r4 31 54 #함수를", "print(\"{:.2f}\".format(x)) # # # 3.14 # # # # print(\"{:+.2f}\".format(x)) # # #", "= {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 #", "11 13 # row_g2 25 27 29 31 33 print(gbr.mean()) # c1 c2", "rg = 'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화", "00200 00200_fff 200.00 FFF # # # # id_name_3 컬럼추가 # # id_name_3", "100.00 # # 5 200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper())", "# # id name id_2 id_name id_3 name_3 # # 0 1 aaa", "# 3.14 # # # # print(\"{:+.2f}\".format(x)) # # # +3.14 # #", "aaa 00001 00001_aaa 1.00 # # 1 2 bbb 00002 00002_bbb 2.00 #", "1 2 bbb 00002 00002_bbb 2.00 # # 2 10 ccc 00010 00010_ccc", "5 7 9 11 13 # row_g2 25 27 29 31 33 print(gbr.mean())", "# row_g1 5 7 9 11 13 # row_g2 25 27 29 31", "as np from pandas import DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200],", "print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1 # r3", "bbb 00002 00002_bbb 2.00 BBB # # 2 10 ccc 00010 00010_ccc 10.00", "id_3 name_3 id_name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA", "CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD # # 4", "# 4 100 eee 00100 # # 5 200 fff 00200 # #", "'_'.join(x),axis=1) #축을 1로 지정 # print(df) # # id name id_2 id_name #", "00001_aaa 1.00 # # 1 2 bbb 00002 00002_bbb 2.00 # # 2", "# # 4 100 eee 00100 00100_eee # # 5 200 fff 00200", "# 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수", "00001_aaa 1.00 AAA 1.00:AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB", "fff 00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서", "18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3", "전달) def rgf(x) : if x == 'r1' or x == 'r2': rg", "0 1 aaa 00001 # # 1 2 bbb 00002 # # 2", "eee 00100 NaN # # 5 200 fff 00200 NaN # # df['id_name']", "대문자로 바꿔줌 # print(df) # # id name id_2 id_name id_3 name_3 #", "200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) # #", "3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # # 4 100 eee", "00100_eee 100.00 EEE 100.00:EEE # # 5 200 fff 00200 00200_fff 200.00 FFF", "row_g1 0 1 2 3 4 # row_g2 5 6 7 8 9", "12 13 14 # row_g4 15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'}", "print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9 # r2 11 24 #", "계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야", "# 3 20 ddd 00020 00020_ddd 20.00 DDD # # 4 100 eee", "5 200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로", "# row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2 12.5 13.5 14.5 15.5", "# 7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777 # # # #", "# 3 # # # # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점", "# 5 200 fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)", "# 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임", "성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다. # col_g2 = c3+c4+c5", "00200_fff 200.00 FFF # # # # id_name_3 컬럼추가 # # id_name_3 =>", "#꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1 1 9", "0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지) # # #", "3.535534 # col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이", "# # # 7,777,777,777 # # # # x=0.25 # # print(\"{:.2%}\".format(x)) #", "# print(\"{:+.2f}\".format(x)) # # # -3.14 # # # # x=2.718 # #", "00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화", "열을 키로 자료를 밸류로 나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로 자료를", "시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임 생성 print(df.groupby(rgf).sum())", "# # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14 # #", "# print(\"{:,}\".format(x)) # # # 7,777,777,777 # # # # x=0.25 # #", "열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id name id_2", "00002 NaN # # 2 10 ccc 00010 NaN # # 3 20", "# # 7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777 # # #", "'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의", "fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 #", "1 2 3 4 # r2 5 6 7 8 9 # r3", "# print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을", "33 print(gbr.mean()) # c1 c2 c3 c4 c5 # row_g1 2.5 3.5 4.5", "2 3 4 # r2 5 6 7 8 9 # r3 10", "# print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 ) # # # 05", "row_g1 5 7 9 11 13 # row_g2 25 27 29 31 33", "# c1 c2 c3 c4 c5 # row_g1 2.5 3.5 4.5 5.5 6.5", "17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2", "지정 하지 않으면 안됨 # print(df) # # id name id_2 id_name #", "100 eee 00100 00100_eee # # 5 200 fff 00200 00200_fff # #", "10 11 12 13 14 # r4 15 16 17 18 19 #", "id_2 id_name # # 0 1 aaa 00001 00001_aaa # # 1 2", "print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # # # 3.15 # # #", "# id name id_2 id_name id_3 name_3 # # 0 1 aaa 00001", "200 fff 00200 00200_fff 200.00 FFF # # # # id_name_3 컬럼추가 #", "NaN # # 2 10 ccc 00010 NaN # # 3 20 ddd", "# # print(\"{:+.2f}\".format(x)) # # # +3.14 # # # # x=-3.141592 #", "':'.join(x),axis=1) # print(df) # # id name id_2 id_name id_3 name_3 id_name_3 #", "id name id_2 id_name # # 0 1 aaa 00001 NaN # #", "# 4 100 eee 00100 00100_eee # # 5 200 fff 00200 00200_fff", "1로 지정 # print(df) # # id name id_2 id_name # # 0", "print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 5 7 9 11", "5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지) # # # 7777777777 #", "df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) # # id", "2 10 ccc 00010 00010_ccc 10.00 CCC # # 3 20 ddd 00020", "r2 11 24 # r3 21 39 # r4 31 54 print(type(mdr)) #", "ddd 00020 00020_ddd 20.00 # # 4 100 eee 00100 00100_eee 100.00 #", "# # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) # # # 3", "2 bbb 00002 00002_bbb # # 2 10 ccc 00010 00010_ccc # #", "00002 00002_bbb 2.00 # # 2 10 ccc 00010 00010_ccc 10.00 # #", "import DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df)", "# dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2 c3 c4", "gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 #", "# 2 10 ccc 00010 00010_ccc 10.00 # # 3 20 ddd 00020", ": 꼭 axis = 1을주어야 한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'}", "r1 0 1 2 3 4 # r2 5 6 7 8 9", "# 0 1 aaa 00001 # # 1 2 bbb 00002 # #", "ccc 00010 00010_ccc 10.00 # # 3 20 ddd 00020 00020_ddd 20.00 #", "x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) # # #", "첫째자리에서 반올림) # # # 3 # # # # x=3.147592 # #", "10.00 CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD # #", "bbb 00002 NaN # # 2 10 ccc 00010 NaN # # 3", "합치는 등의 작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas as pd import", "# row_g2 25 27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1", "axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1 1 9 #", "id name id_2 id_name id_3 name_3 id_name_3 # # 0 1 aaa 00001", "딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에 맞춰 데이터프레임 생성", "9 # row_g3 10 11 12 13 14 # row_g4 15 16 17", "3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2 : 열단위 계산으로 새로운 열", "print(df) # # id name id_2 # # 0 1 aaa 00001 #", "# # x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00% # # #", "# # # 05 # # # # x=7777777777 # # print(\"{:0>5d}\".format(x)) #", "10.00 # # 3 20 ddd 00020 00020_ddd 20.00 # # 4 100", "# 3 20 ddd 00020 # # 4 100 eee 00100 # #", "row_g2 25 27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2", "같은 결과 # c1 c2 c3 c4 c5 # row_g1 5 7 9", "1을주어야 한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭", "row_g3 10 11 12 13 14 # row_g4 15 16 17 18 19", "# # 3 20 ddd 00020 00020_ddd # # 4 100 eee 00100", ": [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성", "2 bbb 00002 00002_bbb 2.00 BBB # # 2 10 ccc 00010 00010_ccc", "+ id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] =", "채워라 ) # # # 05 # # # # x=7777777777 # #", "id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda", "1.00:AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # #", "# # # 3 # # # # x=3.147592 # # print(\"{:.2f}\".format(x)) #", "'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series # Series를", "id name id_2 id_name id_3 # # 0 1 aaa 00001 00001_aaa 1.00", "자료를 밸류로 나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df", "10 ccc 00010 00010_ccc 10.00 CCC # # 3 20 ddd 00020 00020_ddd", "print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 ) # # # 05 #", "# # 4 100 eee 00100 00100_eee 100.00 EEE # # 5 200", "나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4'])", "= 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1 1 9 # r2", "# 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지) # #", "5.5 6.5 # row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2", "셋째자리에서 반올림) # # # 3.15 # # # # x=5 # #", "05 # # # # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5,", "00010 00010_ccc 10.00 # # 3 20 ddd 00020 00020_ddd 20.00 # #", "자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda", "위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) #", "4 100 eee 00100 00100_eee # # 5 200 fff 00200 00200_fff #", "54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2',", "4 100 eee 00100 00100_eee 100.00 EEE # # 5 200 fff 00200", "14 # row_g4 15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr =", "# # # # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림)", "컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정", "# print(\"{:.2f}\".format(x)) # # # 3.14 # # # # print(\"{:+.2f}\".format(x)) # #", "= pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운", "# # # # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서", "# id name id_2 id_name # # 0 1 aaa 00001 NaN #", "DDD # # 4 100 eee 00100 00100_eee 100.00 EEE # # 5", "넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3", "선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x) : if x ==", "# print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # # # 3.15 # #", "# 4 100 eee 00100 00100_eee 100.00 EEE # # 5 200 fff", "# # # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라", "3.535534 3.535534 3.535534 # col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은", "# #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) #", "# # id name id_2 # # 0 1 aaa 00001 # #", "df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) # # id name id_2", "# 정수를 출력하라(소수 점 첫째자리에서 반올림) # # # 3 # # #", "딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x) :", "c3 c4 c5 # row_g1 5 7 9 11 13 # row_g2 25", "# 2 10 ccc 00010 # # 3 20 ddd 00020 # #", "rgf(x) : if x == 'r1' or x == 'r2': rg = 'row_g1'", "생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다. # col_g2", "문자열 컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas as", "13 # row_g2 25 27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) #", "무시=>원 형태 유지) # # # 7777777777 # # print(\"{:,}\".format(x)) # # #", "#새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는", "print(gbr.std()) # c1 c2 c3 c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534", "00200 # # # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # # #", "너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지) # # # 7777777777", "하지 않으면 안됨 # print(df) # # id name id_2 id_name # #", "# # # # id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA # #", "# id name id_2 id_name id_3 name_3 id_name_3 # # 0 1 aaa", "00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df)", "컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을", "msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2", "그룹화(df에 대한 정보가 x에 전달) def rgf(x) : if x == 'r1' or", "00010_ccc 10.00 CCC 10.00:CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD", "c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534", "# r4 15 16 17 18 19 # row_g1 = r1+r2 : 행단위", ": if x == 'r1' or x == 'r2': rg = 'row_g1' else:", "x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) # # id name id_2 id_name", "9 11 13 # row_g2 25 27 29 31 33 print(gbr.mean()) # c1", "df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df) # # id", "import pandas as pd import numpy as np from pandas import DataFrame, Series", "00100 00100_eee # # 5 200 fff 00200 00200_fff # # # #id를", "형식으로 ()안의 인자의 모양을 바꿔준다. # # # # x=3.141592 # # print(\"{:.2f}\".format(x))", "r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에", "# # #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의 id", "# # # -3.14 # # # # x=2.718 # # print(\"{:.0f}\".format(x)) #", "한다. print(gbc.sum()) # col_g1 col_g2 # r1 1 9 # r2 11 24", "c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭", "20 ddd 00020 00020_ddd 20.00 # # 4 100 eee 00100 00100_eee 100.00", "200 fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df)", "12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3 c4 c5 #", "20.00 DDD 20.00:DDD # # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE", "#이용함수 apply import pandas as pd import numpy as np from pandas import", "# -3.14 # # # # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수", "# # 5 200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper()", "ccc 00010 # # 3 20 ddd 00020 # # 4 100 eee", "# <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1 # r3 row_g2", "c4 c5 # r1 0 1 2 3 4 # r2 5 6", "# print(\"{:.2%}\".format(x)) # # # 25.00% # # # # #name + id_2", "#name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name']", "bbb 00002 00002_bbb # # 2 10 ccc 00010 00010_ccc # # 3", "00001 00001_aaa 1.00 # # 1 2 bbb 00002 00002_bbb 2.00 # #", "1 aaa 00001 # # 1 2 bbb 00002 # # 2 10", "Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # #", "# # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # # # 3.15 #", "name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA # # 1", "00010 # # 3 20 ddd 00020 # # 4 100 eee 00100", "# # id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA # # df['id_name_3'] =", "딕셔너리와 같은 결과 # c1 c2 c3 c4 c5 # row_g1 5 7", "if x == 'r1' or x == 'r2': rg = 'row_g1' else: rg", "'r1' or x == 'r2': rg = 'row_g1' else: rg = 'row_g2' return", "# r3 21 39 # r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr)", "컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨", "'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1 # r3 row_g2 # r4", "새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다.", "# #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한", "{'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1", "# # print(\"{:.2%}\".format(x)) # # # 25.00% # # # # #name +", "# r1 0 1 2 3 4 # r2 5 6 7 8", "6.5 # row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3", "3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 #", "id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에", "'r4': 'row_g2'} # dic -> Series # Series를 이용한 그룹화 msr = Series(mdr)", "행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'}", "id_2 id_name id_3 # # 0 1 aaa 00001 00001_aaa 1.00 # #", "# 25.00% # # # # #name + id_2 :재구성 => 두개의 컬럼이", "바꿔준다. # # # # x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14", "print(gbr.mean()) # c1 c2 c3 c4 c5 # row_g1 2.5 3.5 4.5 5.5", "열의 성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr =", "r1 1 9 # r2 11 24 # r3 21 39 # r4", "# print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태 유지)", "# 7,777,777,777 # # # # x=0.25 # # print(\"{:.2%}\".format(x)) # # #", "x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2 # # 0 1 aaa", "4.5 5.5 6.5 # row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1", "= c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다.", "00020_ddd # # 4 100 eee 00100 00100_eee # # 5 200 fff", "# {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series", "df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id name id_2 id_name id_3 #", "print(df) # # id name id_2 id_name id_3 # # 0 1 aaa", "c2 c3 c4 c5 # row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2", "# 5 200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() :", "c3 c4 c5 # row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2 12.5", "# # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라 )", "00002_bbb 2.00 BBB 2.00:BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC", "id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) #", "{'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series #", "#컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의 id 컬럼을 기준으로", "1 aaa 00001 NaN # # 1 2 bbb 00002 NaN # #", "ccc 00010 NaN # # 3 20 ddd 00020 NaN # # 4", "# r2 row_g1 # r3 row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum())", "# # 2 10 ccc 00010 00010_ccc # # 3 20 ddd 00020", "00200 00200_fff # # # #id를 소숫점 이하로 나타내는 새로운 열을 추가 #", "00001 NaN # # 1 2 bbb 00002 NaN # # 2 10", "0 1 2 3 4 # row_g2 5 6 7 8 9 #", "넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2 #", "00001 00001_aaa 1.00 AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB", "# # 3 20 ddd 00020 # # 4 100 eee 00100 #", "ddd 00020 NaN # # 4 100 eee 00100 NaN # # 5", "name id_2 id_name id_3 name_3 id_name_3 # # 0 1 aaa 00001 00001_aaa", "# 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # # 4 100", "mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) #", "20 ddd 00020 NaN # # 4 100 eee 00100 NaN # #", "컬럼을 생성 # #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해", "r2 5 6 7 8 9 # r3 10 11 12 13 14", "00010 00010_ccc 10.00 CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD", "원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워", "# 0>2D(D: 너비가 2, 0으로 채워라 ) # # # 05 # #", "aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # # 1 2 bbb 00002 00002_bbb", "# # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # # 3", "# print(\"{:+.2f}\".format(x)) # # # +3.14 # # # # x=-3.141592 # #", "BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC # # 3", "15 16 17 18 19 # row_g1 = r1+r2 : 행단위 계산으로 새로운", "생성(같은 열의 성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr", "6 7 8 9 # r3 10 11 12 13 14 # r4", "# row_g2 25 27 29 31 33 print(gbr.mean()) # c1 c2 c3 c4", "그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 # data= : 데이터를", ": 대문자로 바꿔줌 # print(df) # # id name id_2 id_name id_3 name_3", "# +3.14 # # # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # #", "# # id name id_2 id_name id_3 name_3 id_name_3 # # 0 1", "Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1 #", "00001 # # 1 2 bbb 00002 # # 2 10 ccc 00010", "x == 'r2': rg = 'row_g1' else: rg = 'row_g2' return rg #", "기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 #", "ddd 00020 00020_ddd # # 4 100 eee 00100 00100_eee # # 5", "00010 00010_ccc 10.00 CCC 10.00:CCC # # 3 20 ddd 00020 00020_ddd 20.00", "c4 c5 # row_g1 0 1 2 3 4 # row_g2 5 6", "# Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) #", "# # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id name", "성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr)", "NaN # # 1 2 bbb 00002 NaN # # 2 10 ccc", "= {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 #", "7 9 11 13 # row_g2 25 27 29 31 33 msc =", "# # # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14 #", "fff 00200 # # # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # #", ": 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2 =", "row_g2 25 27 29 31 33 print(gbr.mean()) # c1 c2 c3 c4 c5", "print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2 c3 c4 c5 # row_g1", "# # id name id_2 id_name id_3 # # 0 1 aaa 00001", "12 13 14 # r4 15 16 17 18 19 # row_g1 =", "# 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 #", "-> Series # Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'>", "결과 # c1 c2 c3 c4 c5 # row_g1 5 7 9 11", "row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2 : 열단위 계산으로", "5 200 fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을", "# x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은", "행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다. # col_g2 =", "c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum())", "3 20 ddd 00020 00020_ddd 20.00 # # 4 100 eee 00100 00100_eee", "1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # # 1 2 bbb 00002", "9 # r2 11 24 # r3 21 39 # r4 31 54", "# print(df) # # id name id_2 # # 0 1 aaa 00001", "\"{:.2f}\".format(x)) # print(df) # # id name id_2 id_name id_3 # # 0", "20.00 # # 4 100 eee 00100 00100_eee 100.00 # # 5 200", "8 9 # row_g3 10 11 12 13 14 # row_g4 15 16", "bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # # 2 10 ccc 00010 00010_ccc", "# #id를 소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) #", "id_name id_3 name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA #", "# # 2 10 ccc 00010 NaN # # 3 20 ddd 00020", "이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 # data= :", "# row_g2 = r3+r4 mdr = {'r1':'row_g1','r2':'row_g2','r3':'row_g3','r4':'row_g4'} gbr = df.groupby(mdr) print(gbr.sum()) # c1", "10 ccc 00010 # # 3 20 ddd 00020 # # 4 100", "15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) #", "점 첫째자리에서 반올림) # # # 3 # # # # x=3.147592 #", "fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) #", "00100_eee # # 5 200 fff 00200 00200_fff # # # #id를 소숫점", "c2 c3 c4 c5 # row_g1 0 1 2 3 4 # row_g2", "eee 00100 00100_eee 100.00 EEE 100.00:EEE # # 5 200 fff 00200 00200_fff", "df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5 # r1 0", "그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 #", "시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x) : if", "# row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534", "np from pandas import DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], #", "# # 4 100 eee 00100 # # 5 200 fff 00200 #", "3.535534 3.535534 # col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의", "3.14 # # # # print(\"{:+.2f}\".format(x)) # # # +3.14 # # #", "출력하라(소수 점 첫째자리에서 반올림) # # # 3 # # # # x=3.147592", "# 0 1 aaa 00001 NaN # # 1 2 bbb 00002 NaN", "= df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) # # id name", "4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE # # 5 200 fff", "점 셋째자리에서 반올림) # # # 3.15 # # # # x=5 #", "= 'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산", "11 12 13 14 # row_g4 15 16 17 18 19 mdr =", "2, 0으로 채워라 ) # # # 05 # # # # x=7777777777", "c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534", "x: \"{:.2f}\".format(x)) # print(df) # # id name id_2 id_name id_3 # #", "# # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # # # # x=3.141592", "# row_g3 10 11 12 13 14 # row_g4 15 16 17 18", "r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2", "# # # #id를 소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x:", "print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4':", "# 딕셔너리와 같은 결과 # c1 c2 c3 c4 c5 # row_g1 5", "# row_g2 5 6 7 8 9 # row_g3 10 11 12 13", "3 20 ddd 00020 # # 4 100 eee 00100 # # 5", "0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name", "df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 5 7 9", "만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2 # #", "# .2f(소수 점 셋째자리에서 반올림) # # # 3.15 # # # #", "# # 0 1 aaa 00001 NaN # # 1 2 bbb 00002", "id_2 id_name id_3 name_3 id_name_3 # # 0 1 aaa 00001 00001_aaa 1.00", "# 1 2 bbb 00002 00002_bbb 2.00 # # 2 10 ccc 00010", "print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의", "# df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id name id_2 id_name id_3", "'_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df) # # id name id_2", "# # # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림)", "# # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # # 2", "row_g2 5 6 7 8 9 # row_g3 10 11 12 13 14", "AAA 1.00:AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB #", "00100_eee # # 5 200 fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda", "00100 00100_eee 100.00 # # 5 200 fff 00200 00200_fff 200.00 # #", "않으면 안됨 # print(df) # # id name id_2 id_name # # 0", "id_2 id_name # # 0 1 aaa 00001 NaN # # 1 2", "# r1 1 9 # r2 11 24 # r3 21 39 #", "33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9 #", "dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2 c3 c4 c5", "name id_2 # # 0 1 aaa 00001 # # 1 2 bbb", "BBB 2.00:BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC #", "29 31 33 print(gbr.mean()) # c1 c2 c3 c4 c5 # row_g1 2.5", "# # # x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14 # #", "00010_ccc # # 3 20 ddd 00020 00020_ddd # # 4 100 eee", "100.00 EEE 100.00:EEE # # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF", "eee 00100 # # 5 200 fff 00200 # # # #format():앞자리의 형식으로", "딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 # data= : 데이터를 넣고 컬럼과", "24 # r3 21 39 # r4 31 54 print(type(mdr)) # <class 'dict'>", "x에 전달) def rgf(x) : if x == 'r1' or x == 'r2':", "sum()) : 꼭 axis = 1을주어야 한다. # col_g2 = c3+c4+c5 mdc =", "00100 NaN # # 5 200 fff 00200 NaN # # df['id_name'] =", "fff 00200 00200_fff # # # #id를 소숫점 이하로 나타내는 새로운 열을 추가", "# 4 100 eee 00100 NaN # # 5 200 fff 00200 NaN", "# c1 c2 c3 c4 c5 # row_g1 5 7 9 11 13", "row_g1 3.535534 3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534", "31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한", "100 eee 00100 00100_eee 100.00 EEE 100.00:EEE # # 5 200 fff 00200", "# # -3.14 # # # # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를", "id name id_2 id_name # # 0 1 aaa 00001 00001_aaa # #", "더해진다.: sum()) : 꼭 axis = 1을주어야 한다. # col_g2 = c3+c4+c5 mdc", "# # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE # # 5", "3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2 : 열단위 계산으로 새로운", "id_name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # #", "# print(df) # # id name id_2 id_name id_3 name_3 id_name_3 # #", "4 # r2 5 6 7 8 9 # r3 10 11 12", "# x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) # #", "추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id name id_2 id_name", "0으로 채워라 ) # # # 05 # # # # x=7777777777 #", "# # 0 1 aaa 00001 00001_aaa 1.00 # # 1 2 bbb", "25 27 29 31 33 print(gbr.mean()) # c1 c2 c3 c4 c5 #", "c5 # row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2 12.5 13.5 14.5", "# # #id를 소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x))", "11 24 # r3 21 39 # r4 31 54 #함수를 이용한 그룹화", "numpy as np from pandas import DataFrame, Series # df = pd.DataFrame({'id' :", "= Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9 # r2 11", "# # # #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이", "# r3 10 11 12 13 14 # r4 15 16 17 18", "bbb 00002 00002_bbb 2.00 # # 2 10 ccc 00010 00010_ccc 10.00 #", "# # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id name", "'row_g1' else: rg = 'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화", "8 9 # r3 10 11 12 13 14 # r4 15 16", "r3 10 11 12 13 14 # r4 15 16 17 18 19", "# r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1', 'r2':", "3 4 # r2 5 6 7 8 9 # r3 10 11", "\"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성 # #새로운 id", "생성 # #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로", "c1 c2 c3 c4 c5 # row_g1 5 7 9 11 13 #", "#groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로", "00020_ddd 20.00 # # 4 100 eee 00100 00100_eee 100.00 # # 5", "그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def", "# # x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14 # # #", "# # 1 2 bbb 00002 # # 2 10 ccc 00010 #", "안됨 # print(df) # # id name id_2 id_name # # 0 1", "2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # # 2 10 ccc 00010", "print(\"{:,}\".format(x)) # # # 7,777,777,777 # # # # x=0.25 # # print(\"{:.2%}\".format(x))", "df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) # # id name id_2", "gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 5", "반올림) # # # 3.15 # # # # x=5 # # print(\"{:0>2d}\".format(x))", "200 fff 00200 00200_fff 200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를", "13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3 c4 c5 # row_g1", "3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 = c1+c2 :", "'r3': 'row_g2', 'r4': 'row_g2'} # dic -> Series # Series를 이용한 그룹화 msr", "2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면", "10.00:CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # #", "1.00 AAA 1.00:AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB", "200.00 FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로", "print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점 첫째자리에서 반올림) # # # 3 # #", "################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를", "EEE # # 5 200 fff 00200 00200_fff 200.00 FFF # # #", "14 # r4 15 16 17 18 19 # row_g1 = r1+r2 :", "FFF # # # # id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA #", "채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2", "3 20 ddd 00020 00020_ddd 20.00 DDD # # 4 100 eee 00100", "row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 #", "00001 00001_aaa # # 1 2 bbb 00002 00002_bbb # # 2 10", "# 5 200 fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1)", "29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1", "맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x))", "1 2 bbb 00002 NaN # # 2 10 ccc 00010 NaN #", "rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x) : if x == 'r1'", "00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda x:x.upper()) #upper() : 대문자로 바꿔줌 # print(df) #", "인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5", "00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id", "c2 c3 c4 c5 # row_g1 5 7 9 11 13 # row_g2", "bbb 00002 # # 2 10 ccc 00010 # # 3 20 ddd", "# df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id name id_2", "# data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df)", "새로운 컬럼 생성####################################### #이용함수 apply import pandas as pd import numpy as np", "# # # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # # # #", "()안의 인자의 모양을 바꿔준다. # # # # x=3.141592 # # print(\"{:.2f}\".format(x)) #", "# print(df) # # id name id_2 id_name id_3 # # 0 1", "id 컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며", "7,777,777,777 # # # # x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00%", "# 0 1 aaa 00001 00001_aaa # # 1 2 bbb 00002 00002_bbb", "# 0 1 aaa 00001 00001_aaa 1.00 # # 1 2 bbb 00002", "39 # r4 31 54 print(type(mdr)) # <class 'dict'> print(mdr) # {'r1': 'row_g1',", "이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1", "fff 00200 00200_fff 200.00 FFF # # # # id_name_3 컬럼추가 # #", "dic -> Series # Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class", "앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id", "id_2 id_name id_3 name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA", "컬럼추가 # # id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1)", "31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9", "pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운 컬럼을", "이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달)", "eee 00100 00100_eee # # 5 200 fff 00200 00200_fff # # df['id_name']", "00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 #", "지정 # print(df) # # id name id_2 id_name # # 0 1", "100.00:EEE # # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF # ###################################################################################################################", "1 2 bbb 00002 00002_bbb 2.00 BBB 2.00:BBB # # 2 10 ccc", "# col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.:", "# #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # # # # x=3.141592 #", "row_g1 # r3 row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와", "# id name id_2 # # 0 1 aaa 00001 # # 1", "17 18 19 # row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은", "27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1", "0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # # 1 2 bbb", "# col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis =", "컬럼들을 합치는 등의 작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas as pd", "27 29 31 33 print(gbr.mean()) # c1 c2 c3 c4 c5 # row_g1", "name id_2 id_name id_3 name_3 # # 0 1 aaa 00001 00001_aaa 1.00", "x == 'r1' or x == 'r2': rg = 'row_g1' else: rg =", "# # 1 2 bbb 00002 NaN # # 2 10 ccc 00010", "1 aaa 00001 00001_aaa 1.00 AAA # # 1 2 bbb 00002 00002_bbb", "# x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14 # # # #", "= df.groupby(mdc,axis=1) #꼭 axis = 1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1", "컬럼 생성####################################### #이용함수 apply import pandas as pd import numpy as np from", "컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4", "'dict'> print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic", "밸류로 나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df =", "100 eee 00100 # # 5 200 fff 00200 # # # #format():앞자리의", "# # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원 형태", "col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1) #꼭 axis = 1을주어야", "# r3 row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은", "5 200 fff 00200 # # # #format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다.", "# # # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로 채워라", "col_g2 # r1 1 9 # r2 11 24 # r3 21 39", "00020 00020_ddd 20.00 DDD 20.00:DDD # # 4 100 eee 00100 00100_eee 100.00", "컬럼을 기준으로 자리수를 맞춰주기 위해 5자리로 통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬", "통일(부족한 자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) #", "= df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df) # #", "# # print(\"{:,}\".format(x)) # # # 7,777,777,777 # # # # x=0.25 #", "4 100 eee 00100 00100_eee 100.00 # # 5 200 fff 00200 00200_fff", "1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) # print(df) # # id", "19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4", "x: '_'.join(x),axis=1) # print(df) # # id name id_2 id_name # # 0", "# # 3 20 ddd 00020 00020_ddd 20.00 DDD # # 4 100", "# r2 11 24 # r3 21 39 # r4 31 54 print(type(mdr))", "# 0 1 aaa 00001 00001_aaa 1.00 AAA 1.00:AAA # # 1 2", ": 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1", "= df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 0 1", "[1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여 새로운 컬럼을 생성 #", "= c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) :", "4 100 eee 00100 NaN # # 5 200 fff 00200 NaN #", "5 200 fff 00200 00200_fff # # # #id를 소숫점 이하로 나타내는 새로운", "print(mdr) # {'r1': 'row_g1', 'r2': 'row_g1', 'r3': 'row_g2', 'r4': 'row_g2'} # dic ->", "# 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가 x에 전달) def rgf(x)", "id_name # # 0 1 aaa 00001 NaN # # 1 2 bbb", "00020 NaN # # 4 100 eee 00100 NaN # # 5 200", "DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) #", "20.00:DDD # # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE # #", "# # 1 2 bbb 00002 00002_bbb # # 2 10 ccc 00010", "계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr", "r2 row_g1 # r3 row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum()) #", "# # 25.00% # # # # #name + id_2 :재구성 => 두개의", "c1 c2 c3 c4 c5 # row_g1 2.5 3.5 4.5 5.5 6.5 #", "rg = 'row_g1' else: rg = 'row_g2' return rg # 딕셔너리나 시리즈의 모습에", "print(df) # # id name id_2 id_name id_3 name_3 # # 0 1", "x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비 이상은 무시=>원", "00100 # # 5 200 fff 00200 # # # #format():앞자리의 형식으로 ()안의", "5 200 fff 00200 00200_fff # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #", ":재구성 => 두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x:", "7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777 # # # # x=0.25", "작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas as pd import numpy as", "0 1 aaa 00001 NaN # # 1 2 bbb 00002 NaN #", "print(msr) # r1 row_g1 # r2 row_g1 # r3 row_g2 # r4 row_g2", "c5 # row_g1 5 7 9 11 13 # row_g2 25 27 29", "NaN # # 3 20 ddd 00020 NaN # # 4 100 eee", "# r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로", "# r2 5 6 7 8 9 # r3 10 11 12 13", "apply import pandas as pd import numpy as np from pandas import DataFrame,", "# 5 200 fff 00200 # # # #format():앞자리의 형식으로 ()안의 인자의 모양을", "as pd import numpy as np from pandas import DataFrame, Series # df", "DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5 # r1 0 1 2", "2 3 4 # row_g2 5 6 7 8 9 # row_g3 10", "name id_2 id_name id_3 # # 0 1 aaa 00001 00001_aaa 1.00 #", "열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis = 1을주어야 한다. #", "20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # # 4 100 eee 00100", "= df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 5 7", "name id_2 id_name # # 0 1 aaa 00001 00001_aaa # # 1", "# # id name id_2 id_name # # 0 1 aaa 00001 00001_aaa", "# # 2 10 ccc 00010 00010_ccc 10.00 CCC # # 3 20", "# 5 200 fff 00200 00200_fff # # # #id를 소숫점 이하로 나타내는", "# # # # x=3.141592 # # print(\"{:.2f}\".format(x)) # # # 3.14 #", "# print(df) # # id name id_2 id_name id_3 name_3 # # 0", "이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # #", "# # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # # # -3.14 # # #", "aaa 00001 NaN # # 1 2 bbb 00002 NaN # # 2", "r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2", "25.00% # # # # #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이", "Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1", "id_2 # # 0 1 aaa 00001 # # 1 2 bbb 00002", "# # 5 200 fff 00200 00200_fff # # # #id를 소숫점 이하로", ": 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis", "# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) # #", "# # 1 2 bbb 00002 00002_bbb 2.00 BBB # # 2 10", "# # 3 20 ddd 00020 00020_ddd 20.00 # # 4 100 eee", "0 1 aaa 00001 00001_aaa 1.00 AAA # # 1 2 bbb 00002", "= df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id name id_2 id_name #", "# # # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) #", "id_name id_3 # # 0 1 aaa 00001 00001_aaa 1.00 # # 1", "3 # # # # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서", "유지) # # # 7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777 #", "# col_g1 col_g2 # r1 1 9 # r2 11 24 # r3", "CCC 10.00:CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD #", "# df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # # id name id_2 # # 0", "3.15 # # # # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2,", "FFF 200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서", "집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄", "너비가 2, 0으로 채워라 ) # # # 05 # # # #", "print(df) # # id name id_2 id_name id_3 name_3 id_name_3 # # 0", "200 fff 00200 00200_fff # # # #id를 소숫점 이하로 나타내는 새로운 열을", "2 10 ccc 00010 # # 3 20 ddd 00020 # # 4", "# id name id_2 id_name # # 0 1 aaa 00001 00001_aaa #", "# x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00% # # # #", "0 1 aaa 00001 00001_aaa # # 1 2 bbb 00002 00002_bbb #", "24 # r3 21 39 # r4 31 54 #함수를 이용한 그룹화 #", "결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지", "00010 NaN # # 3 20 ddd 00020 NaN # # 4 100", "1 2 bbb 00002 00002_bbb # # 2 10 ccc 00010 00010_ccc #", "2 bbb 00002 00002_bbb 2.00 # # 2 10 ccc 00010 00010_ccc 10.00", "# # id_name_3 => 1.00:AAA # # df['id_name_3'] = df[['id_3','name_3']].apply(lambda x: ':'.join(x),axis=1) #", "ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # # 4 100 eee 00100 00100_eee", "c1 c2 c3 c4 c5 # r1 0 1 2 3 4 #", "r4 15 16 17 18 19 # row_g1 = r1+r2 : 행단위 계산으로", "1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을 키로 자료를 밸류로 나타냄 # data=", "13 # row_g2 25 27 29 31 33 print(gbr.mean()) # c1 c2 c3", "3.535534 3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1", "11 13 # row_g2 25 27 29 31 33 msc = Series(mdc) print(df.groupby(msc,axis=1).sum())", "df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을 변경하여", "00002_bbb 2.00 BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC #", "54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는 rgf로 그룹화(df에 대한 정보가", "등의 작업으로 새로운 컬럼 생성####################################### #이용함수 apply import pandas as pd import numpy", "id_name id_3 name_3 id_name_3 # # 0 1 aaa 00001 00001_aaa 1.00 AAA", "r2 11 24 # r3 21 39 # r4 31 54 #함수를 이용한", "pandas import DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) #", "fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정", "EEE 100.00:EEE # # 5 200 fff 00200 00200_fff 200.00 FFF 200.00:FFF #", "from pandas import DataFrame, Series # df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']})", "3.535534 3.535534 3.535534 # row_g2 3.535534 3.535534 3.535534 3.535534 3.535534 # col_g1 =", "c5 # row_g1 0 1 2 3 4 # row_g2 5 6 7", "10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # # 3 20 ddd 00020", "# 4 100 eee 00100 00100_eee 100.00 # # 5 200 fff 00200", "7 8 9 # r3 10 11 12 13 14 # r4 15", "11 24 # r3 21 39 # r4 31 54 print(type(mdr)) # <class", "1을주어야 한다. print(gbc.sum()) # col_g1 col_g2 # r1 1 9 # r2 11", "<class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1 # r3 row_g2 #", "# print(df) # # id name id_2 id_name # # 0 1 aaa", "꼭 axis = 1을주어야 한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc", "10 11 12 13 14 # row_g4 15 16 17 18 19 mdr", "'r2': rg = 'row_g1' else: rg = 'row_g2' return rg # 딕셔너리나 시리즈의", ",너비 이상은 무시=>원 형태 유지) # # # 7777777777 # # print(\"{:,}\".format(x)) #", "1 2 3 4 # row_g2 5 6 7 8 9 # row_g3", "# 2 10 ccc 00010 00010_ccc # # 3 20 ddd 00020 00020_ddd", "id_3 # # 0 1 aaa 00001 00001_aaa 1.00 # # 1 2", "# # 5 200 fff 00200 # # # #format():앞자리의 형식으로 ()안의 인자의", "행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2 = r3+r4", "# # 3 # # # # x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수", "2.00 # # 2 10 ccc 00010 00010_ccc 10.00 # # 3 20", "19 # row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이", "# df = pd.DataFrame({'id' : [1,2,10,20,100,200], # \"name\":['aaa','bbb','ccc','ddd','eee','fff']}) # print(df) # # #컬럼을", "10.00 CCC 10.00:CCC # # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD", "100 eee 00100 00100_eee 100.00 # # 5 200 fff 00200 00200_fff 200.00", "col_g1 = c1+c2 : 열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum())", "# # # # x=5 # # print(\"{:0>2d}\".format(x)) # 0>2D(D: 너비가 2, 0으로", "aaa 00001 00001_aaa 1.00 AAA # # 1 2 bbb 00002 00002_bbb 2.00", "# row_g1 = r1+r2 : 행단위 계산으로 새로운 행 생성(같은 열의 성분이 더해진다.:", "39 # r4 31 54 #함수를 이용한 그룹화 # 딕셔너리나 시리즈 대신 선언되는", "13 14 # r4 15 16 17 18 19 # row_g1 = r1+r2", "print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 0 1 2 3", "13 14 # row_g4 15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr", "c1 c2 c3 c4 c5 # row_g1 0 1 2 3 4 #", "# 1 2 bbb 00002 00002_bbb # # 2 10 ccc 00010 00010_ccc", "# # # # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로", "10 ccc 00010 00010_ccc 10.00 # # 3 20 ddd 00020 00020_ddd 20.00", "df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df) #", "3 4 # row_g2 5 6 7 8 9 # row_g3 10 11", "# # # id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA # # df['id_name_3']", "00020_ddd 20.00 DDD 20.00:DDD # # 4 100 eee 00100 00100_eee 100.00 EEE", "row_g1 2.5 3.5 4.5 5.5 6.5 # row_g2 12.5 13.5 14.5 15.5 16.5", "# df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) # print(df) # # id name id_2", "새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) # # id name", "열단위 계산으로 새로운 열 생성(같은 행의 성분이 더해진다.: sum()) : 꼭 axis =", "= Series(mdr) print(type(msr)) # <class 'pandas.core.series.Series'> print(msr) # r1 row_g1 # r2 row_g1", "# # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x),axis=1) #축을 1로 지정 # print(df) #", "소숫점 이하로 나타내는 새로운 열을 추가 # df['id_3']=df['id'].apply(lambda x: \"{:.2f}\".format(x)) # print(df) #", "11 12 13 14 # r4 15 16 17 18 19 # row_g1", "row_g4 15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum())", "10 ccc 00010 NaN # # 3 20 ddd 00020 NaN # #", "Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9 # r2 11 24", "14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3 c4 c5 # row_g1 3.535534", "200.00 FFF # # # # id_name_3 컬럼추가 # # id_name_3 => 1.00:AAA", "# row_g4 15 16 17 18 19 mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr)", "print(\"{:+.2f}\".format(x)) # # # +3.14 # # # # x=-3.141592 # # print(\"{:+.2f}\".format(x))", "두개의 컬럼이 결합(apply대상이 2개의 컬럼이 됨) # df['id_name'] = df[['id_2','name']].apply(lambda x: '_'.join(x)) #축을", "00010_ccc 10.00 # # 3 20 ddd 00020 00020_ddd 20.00 # # 4", "# r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1", "id name id_2 id_name id_3 name_3 # # 0 1 aaa 00001 00001_aaa", "gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5 # row_g1 0", "7 9 11 13 # row_g2 25 27 29 31 33 print(gbr.mean()) #", "= DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5 # r1 0 1", "모양을 바꿔준다. # # # # x=3.141592 # # print(\"{:.2f}\".format(x)) # # #", "NaN # # 5 200 fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda", "2 bbb 00002 # # 2 10 ccc 00010 # # 3 20", "변경하여 새로운 컬럼을 생성 # #새로운 id 컬럼을 원래의 id 컬럼을 기준으로 자리수를", "1.00 # # 1 2 bbb 00002 00002_bbb 2.00 # # 2 10", "3.5 4.5 5.5 6.5 # row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) #", "0 1 aaa 00001 00001_aaa 1.00 # # 1 2 bbb 00002 00002_bbb", "x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # # # 3.15", "00002 00002_bbb 2.00 BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC", "형태 유지) # # # 7777777777 # # print(\"{:,}\".format(x)) # # # 7,777,777,777", "print(df) # # id name id_2 id_name # # 0 1 aaa 00001", "# # 0 1 aaa 00001 00001_aaa # # 1 2 bbb 00002", "return rg # 딕셔너리나 시리즈의 모습에 맞춰서 그룹화 하여 그룹화 계산 함수의 결과에", "r3 row_g2 # r4 row_g2 # dtype: object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과", "자릿수는 앞자리에 0을 채워 넣는다.)하며 만듬 # df['id_2']=df['id'].apply(lambda x:\"{:0>5d}\".format(x)) # print(df) # #", "ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # # 3 20 ddd 00020 00020_ddd", "'_'.join(x),axis=1) # print(df) # # id name id_2 id_name # # 0 1", "x=0.25 # # print(\"{:.2%}\".format(x)) # # # 25.00% # # # # #name", "00100_eee 100.00 # # 5 200 fff 00200 00200_fff 200.00 # # df['name_3']=df['name'].apply(lambda", "2.00 BBB 2.00:BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC", "ccc 00010 00010_ccc 10.00 CCC # # 3 20 ddd 00020 00020_ddd 20.00", "2 10 ccc 00010 00010_ccc 10.00 CCC 10.00:CCC # # 3 20 ddd", "mdr = {'r1':'row_g1','r2':'row_g1','r3':'row_g2','r4':'row_g2'} gbr = df.groupby(mdr) print(gbr.sum()) # c1 c2 c3 c4 c5", "# # 5 200 fff 00200 00200_fff 200.00 FFF # # # #", "# # +3.14 # # # # x=-3.141592 # # print(\"{:+.2f}\".format(x)) # #", "# c1 c2 c3 c4 c5 # row_g1 3.535534 3.535534 3.535534 3.535534 3.535534", "# c1 c2 c3 c4 c5 # row_g1 0 1 2 3 4", "200.00:FFF # ################################################################################################################### #groupby 집계함수 # 1.딕셔너리를 이용해서 그룹화 #위.. 딕셔너리로 만들어서 열을", "# # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D: 너비가 5, 0으로 채워라 ,너비", "20 ddd 00020 00020_ddd 20.00 DDD # # 4 100 eee 00100 00100_eee", "구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) # c1 c2 c3 c4 c5 # r1", "0>2D(D: 너비가 2, 0으로 채워라 ) # # # 05 # # #", "aaa 00001 00001_aaa # # 1 2 bbb 00002 00002_bbb # # 2", "채워라 ,너비 이상은 무시=>원 형태 유지) # # # 7777777777 # # print(\"{:,}\".format(x))", "31 33 print(gbr.mean()) # c1 c2 c3 c4 c5 # row_g1 2.5 3.5", "# dic -> Series # Series를 이용한 그룹화 msr = Series(mdr) print(type(msr)) #", "# # print(\"{:+.2f}\".format(x)) # # # -3.14 # # # # x=2.718 #", "새로운 행 생성(같은 열의 성분이 더해진다.: sum()) # row_g2 = r3+r4 mdr =", "print(gbc.sum()) # col_g1 col_g2 # r1 1 9 # r2 11 24 #", "# # # 3.14 # # # # print(\"{:+.2f}\".format(x)) # # # +3.14", "만들어서 열을 키로 자료를 밸류로 나타냄 # data= : 데이터를 넣고 컬럼과 인덱스로", "# # 3 20 ddd 00020 NaN # # 4 100 eee 00100", "c2 c3 c4 c5 # r1 0 1 2 3 4 # r2", "# # # # #name + id_2 :재구성 => 두개의 컬럼이 결합(apply대상이 2개의", "r1 row_g1 # r2 row_g1 # r3 row_g2 # r4 row_g2 # dtype:", "= 'row_g1' else: rg = 'row_g2' return rg # 딕셔너리나 시리즈의 모습에 맞춰서", "2.00 BBB # # 2 10 ccc 00010 00010_ccc 10.00 CCC # #", "# 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE # # 5 200", "data= : 데이터를 넣고 컬럼과 인덱스로 자료를 구분. df = DataFrame(data=np.arange(20).reshape(4,5),columns=['c1','c2','c3','c4','c5'],index=['r1','r2','r3','r4']) print(df) #", "= 1을주어야 한다. # col_g2 = c3+c4+c5 mdc = {'c1':'col_g1','c2':'col_g1','c3':'col_g2','c4':'col_g2','c5':'col_g2'} gbc = df.groupby(mdc,axis=1)", "00001_aaa # # 1 2 bbb 00002 00002_bbb # # 2 10 ccc", "# # 5 200 fff 00200 NaN # # df['id_name'] = df[['id_2','name']].apply(lambda x:", "== 'r1' or x == 'r2': rg = 'row_g1' else: rg = 'row_g2'", "00100 00100_eee # # 5 200 fff 00200 00200_fff # # df['id_name'] =", "20 ddd 00020 # # 4 100 eee 00100 # # 5 200", "#format():앞자리의 형식으로 ()안의 인자의 모양을 바꿔준다. # # # # x=3.141592 # #", "10 ccc 00010 00010_ccc # # 3 20 ddd 00020 00020_ddd # #", "00002_bbb # # 2 10 ccc 00010 00010_ccc # # 3 20 ddd", "1.00 AAA # # 1 2 bbb 00002 00002_bbb 2.00 BBB # #", "# x=3.147592 # # print(\"{:.2f}\".format(x)) # .2f(소수 점 셋째자리에서 반올림) # # #", "# r1 row_g1 # r2 row_g1 # r3 row_g2 # r4 row_g2 #", "x: '_'.join(x)) #축을 지정 하지 않으면 안됨 # print(df) # # id name", "object print(df.groupby(msr).sum()) # 딕셔너리와 같은 결과 # c1 c2 c3 c4 c5 #", "-3.14 # # # # x=2.718 # # print(\"{:.0f}\".format(x)) # 정수를 출력하라(소수 점", "print(df) # c1 c2 c3 c4 c5 # r1 0 1 2 3", "msc = Series(mdc) print(df.groupby(msc,axis=1).sum()) # col_g1 col_g2 # r1 1 9 # r2", "DDD 20.00:DDD # # 4 100 eee 00100 00100_eee 100.00 EEE 100.00:EEE #", "row_g2 12.5 13.5 14.5 15.5 16.5 print(gbr.std()) # c1 c2 c3 c4 c5", "16.5 print(gbr.std()) # c1 c2 c3 c4 c5 # row_g1 3.535534 3.535534 3.535534", "# # 3 20 ddd 00020 00020_ddd 20.00 DDD 20.00:DDD # # 4", "# # 05 # # # # x=7777777777 # # print(\"{:0>5d}\".format(x)) # 0>2D(D:" ]
[ "i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = []", "w, h): half_w = im.size[0] / 2 half_h = im.size[1] / 2 return", "Unless required by applicable law or agreed to in writing, software # distributed", "define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in each", "\"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average", "1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True", "'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix':", "keys = [ 'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached)", "from __future__ import division from __future__ import print_function import click import yaml from", "' 'evaluate the VGG and ResNet architectures which do not use a background", "'The number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of", ":] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 -", "'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions)", "offset for the labels in the dataset. This flag is primarily used to", "evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing to use. If left", "False # convert_model = True if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias':", "128] if overlap.mean() + overlap.std() > 128: color = np.array([0, 0, 255]).astype(float) /", "return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir,", "we make a single pass over all of the data. num_batches = math.ceil(dataset.num_samples", "images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config,", "plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None,", "http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for", "True if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias':", "def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir)", "file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not None", "= {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i", "aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated =", "@click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f: config = yaml.load(f)", "aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k, v", "central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1] return central_crop(im, w * central_fraction,", "variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions =", "resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio *", "dataset. This flag is primarily used to ' 'evaluate the VGG and ResNet", "tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record", "provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size,", "1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define", "] for filename, label in filenames: filename = dataset_dir_file(config, filename) # image_np =", "min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas =", "checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel'", "prefix) if labels is not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3)", "else: if k not in aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value)", ":] w, h = image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency, (l,", "= image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency =", "shape [None, 299, 299, 3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='')", "tensorflow.python.training import monitored_session from datasets.plants import read_label_file from datasets import dataset_factory from nets", "cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f:", "'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The", "variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities,", "dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path", "float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir)", "image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\":", "= get_dataset_dir(config) examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir,", "index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3", "'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for", "mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) #", "np.copy(1 - alpha) * 255 overlap = roi_img[paint > 128] if overlap.mean() +", "preprocessing to use. If left ' 'as `None`, then the model_name flag is", "predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for", "size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return", "config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f:", "for k, v in info.items() if isinstance(v, tf.Tensor) and (not keys or k", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir)", "in the dataset. This flag is primarily used to ' 'evaluate the VGG", "examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir,", "'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size))", "slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) #", "import dataset_factory from nets import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import", "'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels", "{ 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias':", "v in info.items(): f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import", "255 # blue else: color = np.array([255, 200, 0]).astype(float) / 255 # orange", "filename, label in filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits =", "as sns set_matplot_zh_font() # ax = plt.subplot() fig, ax = plt.subplots() # the", "range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr)", "指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x)", "tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore =", "half_w + w / 2, half_h + h / 2)) def pre_process_resnet(im, coreml=False):", "* (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img", "15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap =", "-= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1]", "def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None):", "decay to use for the moving average.' 'If left as None, then moving", "= [] for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example)", "label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) #", "{ 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name]", "filenames: filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb( config,", "Compute macro-average ROC curve and ROC area # First aggregate all false positive", "(l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w", "return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) #", "pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1)", "@click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _roc_analysis(config,", "10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path =", "label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import", "= roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes =", "= None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area =", "saliency_maps(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file')", "coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'),", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn", "# TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures", "def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities',", "200, 0]).astype(float) / 255 # orange paint[:, :] *= color roi_img = cv2.multiply(alpha,", "h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size)", "dims to shape [None, 299, 299, 3] image_np = np.expand_dims(image_np, 0) graph =", "roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img return canvas def test_frozen_graph_saliency_map(config):", "return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config):", "4, 'The number of threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants',", "batches to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of", "if k == 'confusion_matrix': if k not in aggregated: aggregated[k] = np.matrix(value) else:", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "numpy as np import tensorflow as tf from tensorflow.python.training import monitored_session from datasets.plants", "yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f: config =", "[0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive", "collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if", "'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold,", "l.\"\"\" return [l[i:i + n] for i in range(0, len(l), n)] def save_saliency_maps(config,", "output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name)", "= tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values,", "if k not in aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else:", "] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels = aggregated['labels']", "eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 *", "get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def", "j in range(n): image = images[j] grad_img = grad_imgs[j] label = labels[j] label_name", "*= 2.0 return arr def pre_process(config, im, coreml=False): model_name = get_model_name(config) return {", "/ 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2", "x /= std x *= target_std x *= 255 x = np.clip(x, 0,", "in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for", "= result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top", "def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np =", "# 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[", "value in names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[])", "= {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"],", "= info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config)", "fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考", "0, 255]).astype(float) / 255 # blue else: color = np.array([255, 200, 0]).astype(float) /", "filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename,", "np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1) paint = np.copy(1 -", "logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs']", "- fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr)", "ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr", "coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def", "= grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10: continue", "\"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor =", "sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1))", "roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability' i_optimal_micro =", "裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size),", "OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100", "architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing to use.", "roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and", "in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute", "import subprocess import PIL import math import os from PIL import Image import", "# Compute micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve(", "kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k, v in kv_pairs: print(k, v)", "pass for j in range(n): image = images[j] grad_img = grad_imgs[j] label =", "import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix =", "[ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs']", "logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def", "save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w')", "get_dataset_dir(config) examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i))", "in filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config,", "alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1) paint =", "as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file,", "= coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True if convert_model: extra_args =", "filename, label in filenames: filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result", "assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name", "= slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix:", "for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params = { k:", "use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities',", "# Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k(", "(dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a", "2 n_classes = len(possible_labels) # Compute macro-average ROC curve and ROC area #", "plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def", "= \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) #", "num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print the summaries to screen. for", "model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes,", ") return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font()", "import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path", "labels is not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image =", "with h5py.File(info_file_path, 'r') as f: return { k: f[k][:] for k in f.keys()", "'') == '': print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot", "record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator: example = tf.train.Example()", "labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i]", "shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name)", "saliency, ], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file,", "names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), })", "w = im.size[0] h = im.size[1] return central_crop(im, w * central_fraction, h *", "= np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i in", "+ 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix", "import math import os from PIL import Image import cv2 import numpy as", "h / 2, half_w + w / 2, half_h + h / 2))", "= np.array([0, 0, 255]).astype(float) / 255 # blue else: color = np.array([255, 200,", "one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path':", "roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC area fpr[\"micro\"],", "tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples =", "grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, #", "not use this file except in compliance with the License. # You may", "= roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel =", "Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN = 123.68", "target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l in im.size) return", "1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction):", "os.makedirs(save_dir) except OSError: pass for j in range(n): image = images[j] grad_img =", "use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is", "import traceback traceback.print_exc() raise for k in res.keys(): value = res[k] if k", "'The address of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory", "result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir =", "input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map = {", "1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc", "for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w =", "slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions", "= len(possible_labels) # Compute macro-average ROC curve and ROC area # First aggregate", "tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names,", "i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i],", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] =", "1)[0] for f in output.split('\\n')) available = list(mat_fonts & zh_fonts) return available def", "if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config)", "language governing permissions and # limitations under the License. # ============================================================================== \"\"\"Generic evaluation", "model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for the", "/ 2 half_h = im.size[1] / 2 return im.crop( (half_w - w /", "# confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma')", "examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename))", "cv2 import numpy as np import tensorflow as tf from tensorflow.python.training import monitored_session", "= plt.subplots() # the size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black #", "}) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None #", "coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),", "agreed to in writing, software # distributed under the License is distributed on", "confusion_matrix(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) keys = [ 'confusion_matrix',", "else: confusion_matrix = None # Print the summaries to screen. for name, value", "-= 0.5 arr *= 2.0 return arr def pre_process(config, im, coreml=False): model_name =", "x.std() if std: x /= std x *= target_std x *= 255 x", "True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path =", "'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor:", "index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name)", "import monitored_session from datasets.plants import read_label_file from datasets import dataset_factory from nets import", "l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w -", "= np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally", "= 'ROC curve of class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label =", "= tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or", "get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False", "pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0],", "# the size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix =", "plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached',", "half_h - h / 2, half_w + w / 2, half_h + h", "labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits =", "= canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor = 3 alpha = np.clip(1", "y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr = {}", "run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name", "= True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False,", "= aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False):", "IOError: return None def chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return", "info.items(): f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path", "- w / 2, half_h - h / 2, half_w + w /", "tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions", "y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) # Compute", "_B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags():", "label in filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml(", ":, 0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :, 2] -=", "index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits',", "open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with", "intensify_factor = 3 alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0,", "= get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or", "rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show()", "image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np", "np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency,", "{} for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ =", "return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size", "input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size is 1", "def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): #", "f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached):", "i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown')", "use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ] aggregated =", "to in writing, software # distributed under the License is distributed on an", "# Define the model # #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages", "np.array([0, 0, 255]).astype(float) / 255 # blue else: color = np.array([255, 200, 0]).astype(float)", "np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr -= 0.5 arr *= 2.0", "implied. # See the License for the specific language governing permissions and #", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np,", "for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path)", "= get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f:", "from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '')", "config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name =", "= x.std() if std: x /= std x *= target_std x *= 255", "labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images,", "return aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated", "= get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np", "tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path',", "# cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') #", "import os from PIL import Image import cv2 import numpy as np import", "f = h5py.File(info_file_path, 'w') for k, v in info.items(): f[k] = v f.close()", "= get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix',", "in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr =", "= all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro'", "print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ],", "coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size =", "import h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k, v", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "traceback.print_exc() raise for k in res.keys(): value = res[k] if k == 'confusion_matrix':", "'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images']", "checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = []", "data from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次", "label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 + 0.5) *", "FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step", "= [labels_to_names[i] if labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis,", "then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS", "file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads',", "roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve", "i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params = { k: v", "config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with", "'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config)", "if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print", "use_cached): with open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached',", "save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax = plt.subplot() fig, ax =", "saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ],", "is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) keys =", "print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file')", "output tensor tensor_map = { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name(", "range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "'An offset for the labels in the dataset. This flag is primarily used", "pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path)", "'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import", "batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model", "prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names']))", "@cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f: config =", "slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics: names_to_values,", "linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels,", "arr[:, :, 0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :, 2]", "sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None,", "_run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f:", "-*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed", "for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture to", "x = np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table =", "lw = 2 n_classes = len(possible_labels) # Compute macro-average ROC curve and ROC", "255 x = np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table", "train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset files are stored.')", "tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name =", "confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') #", "the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating", "def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' %", "image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key):", "import cycle import subprocess import PIL import math import os from PIL import", "feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for", "fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:',", "print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction", "TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model was", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "= PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name", "you may not use this file except in compliance with the License. #", "run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config)", "'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr", "save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError:", "os from PIL import Image import cv2 import numpy as np import tensorflow", "aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k not in", "in kv_pairs: print(k, v) def save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory,", "test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config,", "('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = os.path.join(dataset_dir, filename) image_np", "'The directory where the model was written to or an absolute path to", "_eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities", "prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label", "lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve')", "def deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x) x = np.max(x, axis=2)", "plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show()", "shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels',", "# Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for", "== '': print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as", "except OSError: pass for j in range(n): image = images[j] grad_img = grad_imgs[j]", "shell=True) zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n')) available = list(mat_fonts &", "-_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 /", "example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False):", "filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np,", "'The number of threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The", "-= x.mean() std = x.std() if std: x /= std x *= target_std", "network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider that", "num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the model # #################### logits,", "/ 2, half_h - h / 2, half_w + w / 2, half_h", "a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name", "the labels in the dataset. This flag is primarily used to ' 'evaluate", "fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold)", "= yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as", "mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "n_classes = len(possible_labels) # Compute macro-average ROC curve and ROC area # First", "if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return {", "for i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n", "'.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return", "# cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with", "n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps'", "def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in", "mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])", "224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr", "def plot_roc(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command()", "= 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', }", ":lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n')) available", "zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n')) available = list(mat_fonts & zh_fonts)", "set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts", "test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f: config", "return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)", "output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model", "tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation',", "overlap.mean() + overlap.std() > 128: color = np.array([0, 0, 255]).astype(float) / 255 #", "keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated", "= image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True,", "use. If left ' 'as `None`, then the model_name flag is used.') tf.app.flags.DEFINE_float(", "np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i]", "it and compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr", "image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1)", "def test_models(config_file): with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached',", "the License. # ============================================================================== \"\"\"Generic evaluation script that evaluates a model using a", "' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')", "'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator =", "image_np.shape[0], image_np.shape[1], 3]} # batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\"", "'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from", "License. # ============================================================================== \"\"\"Generic evaluation script that evaluates a model using a given", "'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path =", "sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n", "'/tmp/tfmodel/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number", "__future__ import division from __future__ import print_function import click import yaml from collections", "or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of", "{}'.format(i, num_batches)) params = { k: v for k, v in info.items() if", "checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True)", "in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config,", "AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"],", "images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels',", "get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step", "k in f.keys() } except IOError: return None def chunks(l, n): \"\"\"Yield successive", "255 # orange paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img =", "= cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes), colors): label", "2.0 return arr def pre_process(config, im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50':", "if labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path", "prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 + 0.5)", "l, h_offset:h_offset + l] intensify_factor = 3 alpha = np.clip(1 - intensify_factor *", "###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model #", "restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities,", "coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs =", "yaml.load(f) keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path =", "prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None,", "pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read())", "'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, }", "im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr", "print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager()", "the model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name,", "output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f", "prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name =", "1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for", "= 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value],", "% checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np,", "k == 'confusion_matrix': if k not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k]", "image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency,", "os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size", "image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1],", "'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions':", "name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name", "keys) } try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except: import traceback", "= FontManager() mat_fonts = set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw", "= math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path)", "written to or an absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir',", "print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name) try: with", "= os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path", "predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def", "224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims", "= np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at", "logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]]", "'evaluate the VGG and ResNet architectures which do not use a background '", "'': print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt", "linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False", "save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names =", "prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def", "= load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return aggregated calculate_confusion_matrix = True", "'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits']", "useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs", "sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] #", "dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir)", "filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb'", "from datasets.plants import read_label_file from datasets import dataset_factory from nets import nets_factory from", "[ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in", "fig, ax = plt.subplots() # the size of A4 paper fig.set_size_inches(18, 15) #", "2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2 =", "i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC", "coreml: arr[:, :, 0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :,", "open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def", "the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset files are", "used to ' 'evaluate the VGG and ResNet architectures which do not use", "and (not keys or k in keys) } try: feed_dict = {} res", "all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i,", "as f: config = yaml.load(f) keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config,", "'plants', 'The name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name", "# 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter", "def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l =", "from __future__ import absolute_import from __future__ import division from __future__ import print_function import", "flag is primarily used to ' 'evaluate the VGG and ResNet architectures which", "# tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ######################", "= tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name", "feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k in res.keys(): value = res[k]", "is_training=False) ############################################################## # Create a dataset provider that loads data from the dataset", "evaluation script that evaluates a model using a given dataset.\"\"\" from __future__ import", "pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction", "= [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize tensor", "list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits':", "> 128: color = np.array([0, 0, 255]).astype(float) / 255 # blue else: color", "_ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel", "1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np,", "pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config,", "tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config,", "coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return", "= list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available)", "plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94", "tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None,", "to ' 'evaluate the VGG and ResNet architectures which do not use a", ":] w_offset = int((w - l) / 2) h_offset = int((h - l)", "import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '') == '':", "create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset to load.')", "x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns,", "yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as", "Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path)", "return { k: f[k][:] for k in f.keys() } except IOError: return None", "target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr", "in filenames: filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb(", "saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :]", "image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5", "ax = plt.subplots() # the size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black", "axis=2) x -= x.mean() std = x.std() if std: x /= std x", "i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n =", "not coreml: arr[:, :, 0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:,", "1 for row in image_table: for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col)", "299, 3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as", "= nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider that loads", "2 return im.crop( (half_w - w / 2, half_h - h / 2,", "dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the", "'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold", ":, 2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h", "def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config )", "in list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names,", "tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\"", "0]).astype(float) / 255 # orange paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float))", "list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix", "logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore =", "set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体", "labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list,", "tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for the moving average.' 'If left", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config):", "None # Print the summaries to screen. for name, value in names_to_values.items(): summary_name", "y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC area", "slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels,", "= False # convert_model = True if convert_model: extra_args = { 'resnet_v2_50': {", "v in kv_pairs: print(k, v) def save_var(directory, file_name, info): import h5py info_file_path =", "predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize", "target_std x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list,", "image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1 for", "get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys,", "get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return", "tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model was written to or an", "def central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1] return central_crop(im, w *", "> 128] if overlap.mean() + overlap.std() > 128: color = np.array([0, 0, 255]).astype(float)", "enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits':", "FontManager import matplotlib if os.environ.get('DISPLAY', '') == '': print('no display found. Using non-interactive", "np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr = {} roc_auc =", "plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config)", "{ 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias':", "saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w - l)", "import Image import cv2 import numpy as np import tensorflow as tf from", "slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT", "= np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None],", "'num_preprocessing_threads', 4, 'The number of threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name',", "`None`, then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to", "f.keys() } except IOError: return None def chunks(l, n): \"\"\"Yield successive n-sized chunks", "from l.\"\"\" return [l[i:i + n] for i in range(0, len(l), n)] def", "get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [", "if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples", "v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name)", "= get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1", "See the License for the specific language governing permissions and # limitations under", "PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits)", "image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index =", "curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown')", "= [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label", "= yaml.load(f) keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path", "= labels_to_names[label] if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label,", "paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix)", "參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2", "{} fpr = {} roc_auc = {} for i in range(len(possible_labels)): y_scores =", "PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0] / 2 half_h = im.size[1]", "get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)", "1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "__future__ import print_function import click import yaml from collections import Iterable, defaultdict from", "FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image,", "for k in f.keys() } except IOError: return None def chunks(l, n): \"\"\"Yield", "'%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config,", "'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the", "label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the model #", "slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels", "loads data from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, #", "from __future__ import print_function import click import yaml from collections import Iterable, defaultdict", "= get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ]", "+= np.matrix(value) else: if k not in aggregated: aggregated[k] = [] if isinstance(value,", "pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f: config", "= { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result =", "- FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset", "checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch #{}", "Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size", "import print_function import click import yaml from collections import Iterable, defaultdict from itertools", "def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def", "print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure() colors = cycle(['aqua',", "coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image", "kv_pairs: print(k, v) def save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory, file_name)", "'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path,", "output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input", "== 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True,", "the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture to evaluate.')", "'__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np", "colors): label = 'ROC curve of class {0} (area = {1:0.2f})'.format( i, roc_auc[i])", "np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -= _R_MEAN arr[:, :, 1] -=", "h): half_w = im.size[0] / 2 half_h = im.size[1] / 2 return im.crop(", "0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap =", "], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None):", "tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'],", "else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 +", "the specific language governing permissions and # limitations under the License. # ==============================================================================", "averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS", "name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the", "coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def", "#################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore", "math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names", "image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l = min(w, h) saliency", "@click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file)", "import click import yaml from collections import Iterable, defaultdict from itertools import cycle", "@click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_info(config,", "@click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f: config = yaml.load(f)", "to use. If left ' 'as `None`, then the model_name flag is used.')", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size,", "import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary", "# 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size,", "not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k", "config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\", index)", "https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') #", "def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits =", "classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix,", "n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i =", "/ 255 # orange paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img", "try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise", "Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k, v in kv_pairs: print(k,", "Print the summaries to screen. for name, value in names_to_values.items(): summary_name = 'eval/%s'", "aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names =", "FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider", "= auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series]", "label -= FLAGS.labels_offset raw_images = image ##################################### # Select the preprocessing function #", "where the model was written to or an absolute path to a '", "checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images,", "{ 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def", "value = res[k] if k == 'confusion_matrix': if k not in aggregated: aggregated[k]", "images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) ####################", "fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series =", "n_columns, i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot", "_run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size =", "slim.get_or_create_global_step() ###################### # Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name,", "sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map = { 'logits': output_tensor, } if", "restored_image = ((image / 2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency)", "n = images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try:", "'run_info_result.h5') if aggregated is not None: return aggregated calculate_confusion_matrix = True info =", "= slim.get_or_create_global_step() ###################### # Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name,", "or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label],", "open(config_file) as f: config = yaml.load(f) keys = [ 'confusion_matrix', ] aggregated =", "= mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability'", "= tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches", "'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master,", "if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def", "def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as", "return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config,", "colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes), colors):", "'If left as None, then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None,", "files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in the", "image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached:", "as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def,", "= auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel !=", "return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or", "len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def", "checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0,", "True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax,", "@click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config,", "or an absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory", "= tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot", "= yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file)", "intensify_factor * saliency.astype(float) / 255, 0, 1) paint = np.copy(1 - alpha) *", "###################### # Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir)", "graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np = pre_process(config, image_np)", "def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from", "n] for i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None):", "all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path,", "chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i + n] for", "from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 #", "'ROC curve of class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label = None", "return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename)", "not in aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels", "KIND, either express or implied. # See the License for the specific language", "= np.copy(1 - alpha) * 255 overlap = roi_img[paint > 128] if overlap.mean()", "row in image_table: for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i +=", "n-sized chunks from l.\"\"\" return [l[i:i + n] for i in range(0, len(l),", "v) def save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory, file_name) f =", "3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name", "batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the model # ####################", "= cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction", "save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys =", "============================================================================== \"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\" from", "print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([", "directory where the model was written to or an absolute path to a", "ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown')", "x.mean() std = x.std() if std: x /= std x *= target_std x", "len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path =", "k in res.keys(): value = res[k] if k == 'confusion_matrix': if k not", "plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes),", "} }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args )", "'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir", "\"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n')) available = list(mat_fonts", "model_name = get_model_name(config) image_size = 224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np,", "ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else i for i in range(n)]", "results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency =", "permissions and # limitations under the License. # ============================================================================== \"\"\"Generic evaluation script that", "split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset files are stored.') tf.app.flags.DEFINE_integer(", "getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if", "'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where", "for k in res.keys(): value = res[k] if k == 'confusion_matrix': if k", "prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images',", "number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches", "if labels is not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image", "labels = info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities,", "'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config,", "= \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor", "ANY KIND, either express or implied. # See the License for the specific", "= im.size[0] h = im.size[1] return central_crop(im, w * central_fraction, h * central_fraction)", "np.abs(x) x = np.max(x, axis=2) x -= x.mean() std = x.std() if std:", "checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if", "fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw =", "the dataset. This flag is primarily used to ' 'evaluate the VGG and", "x = np.abs(x) x = np.max(x, axis=2) x -= x.mean() std = x.std()", "os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results =", "w, h = image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency, (l, l))", "save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys =", "2, half_h + h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1", "'as `None`, then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay", "= get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config,", "for k, v in kv_pairs: print(k, v) def save_var(directory, file_name, info): import h5py", "# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = os.path.join(dataset_dir, filename)", "of the preprocessing to use. If left ' 'as `None`, then the model_name", "= get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name", "import read_label_file from datasets import dataset_factory from nets import nets_factory from preprocessing import", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "for k, v in info.items(): f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory,", "'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels =", "if os.environ.get('DISPLAY', '') == '': print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg')", "'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config,", "import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78", "255.0 arr -= 0.5 arr *= 2.0 return arr def pre_process(config, im, coreml=False):", "to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used to create the batches.')", "keys or k in keys) } try: feed_dict = {} res = sess.run(params,", "not None: return aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches =", "return central_crop(im, w * central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考", "print(len(examples)) examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i))", "plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax = plt.subplot() fig,", "network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss,", "*= 255 x = np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None):", "collections import Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda", "canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb')", "plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else:", "tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4,", "0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row", "PIL import Image import cv2 import numpy as np import tensorflow as tf", "] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix", "# ============================================================================== \"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\"", "' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the", "axis = [labels_to_names[i] if labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270)", "use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config)", "u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = dataset_dir_file(config,", "auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) # Compute macro-average ROC curve", "u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = os.path.join(dataset_dir,", "target_std=0.3) restored_image = ((image / 2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image,", "lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4)", "model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the", "- l) / 2) h_offset = int((h - l) / 2) roi_img =", "##################################### # Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name", "* l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h):", "from datasets import dataset_factory from nets import nets_factory from preprocessing import preprocessing_factory from", "weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches,", "1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\")", "use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model was written to or", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask,", "with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True)", "preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size", "common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images =", "= read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j in", "get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def", "value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1", "(1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img return", "return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore':", "optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC", "https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 =", "average.' 'If left as None, then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size',", "in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0]", "file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated =", "= '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not None else", "dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ######################", "= int((w - l) / 2) h_offset = int((h - l) / 2)", "= dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model # #################### num_classes", "= get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### #", "labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),", "tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The", "deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 + 0.5) * 255).astype('uint8') blend =", "labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else i for i in", "blend, image_np, saliency, ], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True)", "sorted(dict(c).items(), key=lambda p: p[0]) for k, v in kv_pairs: print(k, v) def save_var(directory,", "aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "# image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index =", "output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:,", "f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path =", "dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir)", "h = im.size[1] return central_crop(im, w * central_fraction, h * central_fraction) def pre_process_mobilenet(im,", "def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default()", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions,", "\"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get", "123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1':", "# get output tensor tensor_map = { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs']", "for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it", "all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves", "tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) #", "all of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or", "= info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as", "return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1] return central_crop(im,", "+= np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /=", "model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg',", "get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def", "128: color = np.array([0, 0, 255]).astype(float) / 255 # blue else: color =", "-1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file,", "the VGG and ResNet architectures which do not use a background ' 'class", "checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities", "applicable law or agreed to in writing, software # distributed under the License", "def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for f", "0.5 arr *= 2.0 return arr def pre_process(config, im, coreml=False): model_name = get_model_name(config)", "fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr[\"macro\"]", "the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1,", "ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"]", "mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr", "batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name',", "string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def", "= plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix ==", "Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0])", "ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to evaluate by default", "if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels", "variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else:", "from nets import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import", "Finally average it and compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"]", "background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of", "# -*- coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights", "= np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name =", "get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np =", "Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved')", "writing, software # distributed under the License is distributed on an \"AS IS\"", "' 'as `None`, then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The", "_G_MEAN arr[:, :, 2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w =", "os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path =", "with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name =", "num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' %", "the moving average.' 'If left as None, then moving averages are not used.')", "tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches =", "import Iterable, defaultdict from itertools import cycle import subprocess import PIL import math", "index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name)", "' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np,", "{} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in", "n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series", "utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # #", "central_fraction): w = im.size[0] h = im.size[1] return central_crop(im, w * central_fraction, h", "= confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix')", "print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [", "for i in range(n_classes)])) # Then interpolate all ROC curves at this points", "test_models(config_file): with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True)", "%s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions,", "convert_model = False # convert_model = True if convert_model: extra_args = { 'resnet_v2_50':", "aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs',", "of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to", "saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 + 0.5) * 255).astype('uint8')", "overlap.std() > 128: color = np.array([0, 0, 255]).astype(float) / 255 # blue else:", "compliance with the License. # You may obtain a copy of the License", "w * central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py #", "result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\",", "= { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result", "None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2", "= tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),", "None def chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i +", "tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ###################### dataset =", "for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts =", "get_model_name(config) image_size = 224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size))", "is primarily used to ' 'evaluate the VGG and ResNet architectures which do", "i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) #", "area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw", "[labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label':", "= pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1,", "i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size):", "probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss,", "x = np.max(x, axis=2) x -= x.mean() std = x.std() if std: x", "with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def", "of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the", "_eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5')", "'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts", "{ \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size is 1 output_tensor_name =", "im.crop( (half_w - w / 2, half_h - h / 2, half_w +", "== np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr = {} roc_auc", "use_cached=use_cached) from collections import Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs =", "-1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model =", "''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0])", "all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes):", "- h / 2, half_w + w / 2, half_h + h /", "* FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset", "= slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size)", "n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from", "preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '') == '': print('no", "keys = [ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs", "dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits", "labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix =", "= central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr =", "with open(config_file) as f: config = yaml.load(f) keys = [ 'confusion_matrix', ] aggregated", "] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names", "used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset", "# scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None,", "name of the preprocessing to use. If left ' 'as `None`, then the", "p: p[0]) for k, v in kv_pairs: print(k, v) def save_var(directory, file_name, info):", "def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config,", "'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name)", "= sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config,", "'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of", "@cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f: config =", "# Create a dataset provider that loads data from the dataset # ##############################################################", "plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x) x", "target_std=0.15): # normalize tensor x = np.abs(x) x = np.max(x, axis=2) x -=", "= tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir", "i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png')", "{'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[", "softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, #", "in aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels =", "labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([", "color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int)", "def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10))", "central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -=", "[image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image ##################################### #", "use_cached): with open(config_file) as f: config = yaml.load(f) keys = [ 'confusion_matrix', ]", "Create a dataset provider that loads data from the dataset # ############################################################## provider", "auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] -", "('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames:", "get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i,", "'preprocessing_name', None, 'The name of the preprocessing to use. If left ' 'as", "coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved. #", "target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l)", "coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None):", "name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i]", "get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params", "absolute_import from __future__ import division from __future__ import print_function import click import yaml", "def confusion_matrix(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) keys = [", "np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') #", "preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '') ==", "shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label", "roc_auc = {} for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i],", "or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config)", "(the \"License\"); # you may not use this file except in compliance with", "_roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities', ]", "graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config)", "canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir", "_eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels = aggregated['labels'] c = Counter(all_labels)", "u'酢漿草'), ] for filename, label in filenames: filename = dataset_dir_file(config, filename) # image_np", "pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w,", "% checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def,", "over all of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path", "# Unless required by applicable law or agreed to in writing, software #", "load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return aggregated calculate_confusion_matrix = True info", "by applicable law or agreed to in writing, software # distributed under the", "'batch_size', BATCH_SIZE, 'The number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max", "im.size[0] / 2 half_h = im.size[1] / 2 return im.crop( (half_w - w", "coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file =", "= label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix", "plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0]", "tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the", "label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename): filename =", "= [ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs =", "use_cached): with open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def", "and # limitations under the License. # ============================================================================== \"\"\"Generic evaluation script that evaluates", "xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title and", "labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index", "= get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if __name__", "sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k in res.keys(): value =", "file except in compliance with the License. # You may obtain a copy", "cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] =", "matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN =", "'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config,", "FLAGS.max_num_batches else: # This ensures that we make a single pass over all", "keys = [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss',", "plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate')", "= os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in range(5):", "for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im,", "range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio =", "stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in the dataset. This", "curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] =", "_eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config,", ") print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index]", "def chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i + n]", "is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached)", "= '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np =", "[] for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return", "tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy(", "= dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path)", "# print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name =", "os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path", "# Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) ####################", "index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\",", "y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive],", "= os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, )", "i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold =", "list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name,", "= info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config):", "_eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'],", "try: with h5py.File(info_file_path, 'r') as f: return { k: f[k][:] for k in", "model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average", "for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples =", "= im.size[1] return central_crop(im, w * central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False):", "get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset #", "= info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels,", "255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row =", "the model was written to or an absolute path to a ' 'checkpoint", "orange paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint *", "left ' 'as `None`, then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None,", "macro-average ROC curve and ROC area # First aggregate all false positive rates", "print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure() colors", "'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0,", "dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path,", "ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def", "im.size[0] h = im.size[1] return central_crop(im, w * central_fraction, h * central_fraction) def", "result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def", "config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file)", "cycle import subprocess import PIL import math import os from PIL import Image", "label_name.encode('utf-8'), prefix) if labels is not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img,", "/= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) #", "(half_w - w / 2, half_h - h / 2, half_w + w", "metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5),", "else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes", "dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg',", "'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [", "k not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else: if", "extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1':", "'The directory where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset", "roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset +", "(image_size, image_size)) # expand dims to shape [None, 299, 299, 3] image_np =", "w_offset = int((w - l) / 2) h_offset = int((h - l) /", "int((w - l) / 2) h_offset = int((h - l) / 2) roi_img", "alpha) * 255 overlap = roi_img[paint > 128] if overlap.mean() + overlap.std() >", "'The name of the preprocessing to use. If left ' 'as `None`, then", "tensor tensor_map = { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0')", "mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output =", "OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True", "100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches',", "info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f: return { k:", "use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f: config", ">= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels", "-= FLAGS.labels_offset raw_images = image ##################################### # Select the preprocessing function # #####################################", "[1, image_np.shape[0], image_np.shape[1], 3]} # batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] +", "curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits',", "labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j", "= _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels = aggregated['labels'] c =", "itertools import cycle import subprocess import PIL import math import os from PIL", "= roi_img[paint > 128] if overlap.mean() + overlap.std() > 128: color = np.array([0,", "% checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits,", "= info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path)", "then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use", "image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np = pre_process(config, image_np) image_np", "run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config,", "frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename)", "% name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES,", "import seaborn as sns set_matplot_zh_font() # ax = plt.subplot() fig, ax = plt.subplots()", "for i in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import", "/ 255 # blue else: color = np.array([255, 200, 0]).astype(float) / 255 #", "blue else: color = np.array([255, 200, 0]).astype(float) / 255 # orange paint[:, :]", "roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels)", "fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) #", "screen. for name, value in names_to_values.items(): summary_name = 'eval/%s' % name op =", "linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05])", "image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims to", "from itertools import cycle import subprocess import PIL import math import os from", "seaborn as sns set_matplot_zh_font() # ax = plt.subplot() fig, ax = plt.subplots() #", "roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary =", "plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for", "raw_images = image ##################################### # Select the preprocessing function # ##################################### preprocessing_name =", "= tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding(", "limitations under the License. # ============================================================================== \"\"\"Generic evaluation script that evaluates a model", "for row in image_table: for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i", "('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = dataset_dir_file(config, filename) #", "_R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN return arr", "= 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1',", "'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes), colors): label = 'ROC curve", "get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if __name__ ==", "common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -=", "by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master", "image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label']", "read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index", "roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path =", "2, half_h - h / 2, half_w + w / 2, half_h +", "{} roc_auc = {} for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i],", "TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version", "this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i],", "'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture", "a given dataset.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for i in", "get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config ) return", "output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename,", "l] intensify_factor = 3 alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255,", "= np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro]", "import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if", "# Finally average it and compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr", "fm = FontManager() mat_fonts = set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list", "isinstance(v, tf.Tensor) and (not keys or k in keys) } try: feed_dict =", "# confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap", "variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions =", "'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of", "calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with", "np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes", "int((h - l) / 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l]", "_B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1] return", "= np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -= _R_MEAN arr[:, :, 1]", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h", "enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np = pre_process(config, image_np) image_np =", "{ 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size',", "[None, 299, 299, 3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with", "num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## #", "num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label'])", "summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op,", "are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def", "\"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i + n] for i in", "dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in", "plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config)", "optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr)", "np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list =", "@click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) keys", "= variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits,", "'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC", "im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr /=", "else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image]", "= tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "in info.items(): f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py", "{ 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config))", "= os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From", "/= std x *= target_std x *= 255 x = np.clip(x, 0, 255).astype('uint8')", "else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length',", "name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)", "make a single pass over all of the data. num_batches = math.ceil(dataset.num_samples /", "calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step()", "= 3 alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1)", "num_batches = info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path)", "'' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys", "function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False)", "= tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels)", "= 2 n_classes = len(possible_labels) # Compute macro-average ROC curve and ROC area", "target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not", "logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or", "= PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits,", "'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0,", "# num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image',", "== 'confusion_matrix': if k not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] +=", "'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') tf.app.flags.DEFINE_integer(", "} return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config)", "index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0])", "aggregated is not None: return aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix)", "== label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config)", "file_name) try: with h5py.File(info_file_path, 'r') as f: return { k: f[k][:] for k", "session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession(", "def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path", "image_table: for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if", "= _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix = ''", "'w') for k, v in info.items(): f[k] = v f.close() print(info_file_path, 'saved') def", "try: os.makedirs(save_dir) except OSError: pass for j in range(n): image = images[j] grad_img", "color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0,", "# output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name(", "'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated =", "3]} # batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model =", "filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np,", "= tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0]", "all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then", "% checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes =", "which do not use a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string(", "= np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm')", "half_h = im.size[1] / 2 return im.crop( (half_w - w / 2, half_h", "pass over all of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path =", "arr[:, :, 1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN return arr def", "the License for the specific language governing permissions and # limitations under the", "label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if", "http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds", "variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1)", "y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] =", "# ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename = dataset_dir_file(config, filename)", "model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file =", "names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op =", "PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr -= 0.5", "of {}'.format(i, num_batches)) params = { k: v for k, v in info.items()", "coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names =", "def saliency_maps(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command()", "= sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k in res.keys(): value", "2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with", "tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series])", "checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg')", "2) h_offset = int((h - l) / 2) roi_img = canvas[w_offset:w_offset + l,", "or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np =", "'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config)", "= im.size[0] / 2 half_h = im.size[1] / 2 return im.crop( (half_w -", "tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: #", "x -= x.mean() std = x.std() if std: x /= std x *=", "examples = [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename))", "sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] +", "*= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha),", "y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i])", "roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor = 3 alpha =", "output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) #", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for", "np.max(x, axis=2) x -= x.mean() std = x.std() if std: x /= std", "_run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ] aggregated", "0, 1) paint = np.copy(1 - alpha) * 255 overlap = roi_img[paint >", "tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1],", "2, half_w + w / 2, half_h + h / 2)) def pre_process_resnet(im,", "= get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path", "plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show()", "moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS =", "image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index", "in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import", "return [l[i:i + n] for i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs,", "= get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset", "cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix", "keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs,", "= True if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN,", "= chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row", "aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix =", "for filename, label in filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits", "nets import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib", "continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not", "use_cached): with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached',", "/ 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'],", "x *= target_std x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x", "v in info.items() if isinstance(v, tf.Tensor) and (not keys or k in keys)", "get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html", "matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '') == '': print('no display found.", "dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l", "target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:,", "= run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index)", "tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output", "raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix,", "tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5':", "session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax =", "pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg',", "= FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch(", "tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image}", "= (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create", "checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config,", "= {} roc_auc = {} for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i]", "= provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image ##################################### # Select the", "# 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config,", "= get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if __name__ == '__main__': define_tf_flags()", "f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',',", "((image / 2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency,", "= image_np[:, :] w_offset = int((w - l) / 2) h_offset = int((h", "monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax", "key_series = 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr =", "central_crop(im, w * central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py", "'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency,", "i, color in zip(range(n_classes), colors): label = 'ROC curve of class {0} (area", "resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml:", "feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None):", "= _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir)", "Version 2.0 (the \"License\"); # you may not use this file except in", "aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return aggregated calculate_confusion_matrix =", "{ 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore,", "+ w / 2, half_h + h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size", "This flag is primarily used to ' 'evaluate the VGG and ResNet architectures", "tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr[\"macro\"] =", "pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name", "= sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs':", "blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] +=", "coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs", "num_batches = FLAGS.max_num_batches else: # This ensures that we make a single pass", "'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes), colors): label = 'ROC", "from collections import Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(),", "slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else:", "examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size", "label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix ==", "= np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr -= 0.5 arr *=", "f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with", "return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in", "model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image =", "ax = plt.subplot() fig, ax = plt.subplots() # the size of A4 paper", "probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator", "grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values':", "get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select", "logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction", "# config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as", "os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k, v in info.items(): f[k] =", "sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix", "image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims to shape [None, 299,", "/ 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor = 3", "chunks from l.\"\"\" return [l[i:i + n] for i in range(0, len(l), n)]", "= np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend", "tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates':", "116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def", "np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix,", "{0} (area = {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw,", "labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path =", "range(n): image = images[j] grad_img = grad_imgs[j] label = labels[j] label_name = labels_to_names[label]", "h5py.File(info_file_path, 'w') for k, v in info.items(): f[k] = v f.close() print(info_file_path, 'saved')", "+ h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im,", "= run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction label", "None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})'", "preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR)", "num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image,", "if isinstance(v, tf.Tensor) and (not keys or k in keys) } try: feed_dict", "= read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index =", "logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix", "print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name ==", "label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path", "cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap", "= checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities =", "evaluates a model using a given dataset.\"\"\" from __future__ import absolute_import from __future__", "grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix,", "grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [", "roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset", "[available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize tensor x", "saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def cli(): pass @cli.command()", "import tensorflow as tf from tensorflow.python.training import monitored_session from datasets.plants import read_label_file from", "(area = {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label)", "'The name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of", "aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels',", "saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used to create the", "l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w", "255, 0, 1) paint = np.copy(1 - alpha) * 255 overlap = roi_img[paint", "label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches':", "prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for", "use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to use.')", "print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config,", "__future__ import absolute_import from __future__ import division from __future__ import print_function import click", "= np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed(", "label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image ##################################### # Select", "k not in aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value)", "run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path", "os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038", "is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached)", "for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples", "def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r')", "_roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f:", "'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss':", "def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key)", "'model_name', 'mobilenet_v1', 'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The", "# expand dims to shape [None, 299, 299, 3] image_np = np.expand_dims(image_np, 0)", "'%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes", "checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = {", "name:\", prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label", "or k in keys) } try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict)", "_run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images', 'logits',", "image ##################################### # Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or", "im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr", "checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if", "} def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames =", "logits_list = info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list,", "annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels')", "# limitations under the License. # ============================================================================== \"\"\"Generic evaluation script that evaluates a", "[image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the model", "= { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer(", "FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size", "example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config,", "fpr = {} roc_auc = {} for i in range(len(possible_labels)): y_scores = y_score_matrix[:,", "ResNet architectures which do not use a background ' 'class for the ImageNet", "labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### #", "3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename):", "def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1,", "= list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result = { 'prediction_name':", "plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path", "[labels_to_names[i] if labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0)", "255]).astype(float) / 255 # blue else: color = np.array([255, 200, 0]).astype(float) / 255", "'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)", "f[k][:] for k in f.keys() } except IOError: return None def chunks(l, n):", "], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached):", "= tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print the summaries to", "# example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def", "np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k not in aggregated: aggregated[k] =", "def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'),", "k, v in kv_pairs: print(k, v) def save_var(directory, file_name, info): import h5py info_file_path", "= yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f: config", "results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency,", "k, v in info.items(): f[k] = v f.close() print(info_file_path, 'saved') def load_var(directory, file_name):", "ensures that we make a single pass over all of the data. num_batches", "/ 2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image,", "每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] =", "Compute micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(),", "os.environ.get('DISPLAY', '') == '': print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg') import", "os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f: return { k: f[k][:] for", "OF ANY KIND, either express or implied. # See the License for the", "not coreml: arr /= 255.0 arr -= 0.5 arr *= 2.0 return arr", "+ l, h_offset:h_offset + l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir =", "/ 2) h_offset = int((h - l) / 2) roi_img = canvas[w_offset:w_offset +", "config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with", "flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for the moving", "arr = np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr -= 0.5 arr", "def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] #", "top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result = {", "common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image #####################################", "optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure() colors =", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under", "the size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix", "} except IOError: return None def chunks(l, n): \"\"\"Yield successive n-sized chunks from", "* saliency.astype(float) / 255, 0, 1) paint = np.copy(1 - alpha) * 255", "'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path", "None], y_score_matrix, 0) tpr = {} fpr = {} roc_auc = {} for", "np.matrix(value) else: if k not in aggregated: aggregated[k] = [] if isinstance(value, Iterable):", "pre_process(config, im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im,", "plt.subplots() # the size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix", "the summaries to screen. for name, value in names_to_values.items(): summary_name = 'eval/%s' %", "2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name],", "paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1", "batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to evaluate by default use", "evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow", "prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i", "architectures which do not use a background ' 'class for the ImageNet dataset.')", "that loads data from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1,", "'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels':", "= roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path", "capacity=5 * FLAGS.batch_size) #################### # Define the model # #################### logits, _ =", "= 224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand", "calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print the", "'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size is", "default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to", "optimal_threshold) # Plot all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if", "if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix)", "coreml: arr /= 255.0 arr -= 0.5 arr *= 2.0 return arr def", "= slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset)", "tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS,", "arr[:, :, 2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0]", "'checkpoint_path', None, 'The directory where the model was written to or an absolute", "non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN =", "= OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor", "' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename)", "h5py.File(info_file_path, 'r') as f: return { k: f[k][:] for k in f.keys() }", "plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15, 10))", "import FontManager import matplotlib if os.environ.get('DISPLAY', '') == '': print('no display found. Using", "= deprocess_image(grad_img, target_std=0.3) restored_image = ((image / 2 + 0.5) * 255).astype('uint8') blend", "#################### # Define the model # #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay:", "read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j in range(n):", "pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size,", "**extra_args ) coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output:", "= results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np,", "= np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k not in aggregated: aggregated[k]", "with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n =", "'', 'The address of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The", "model # #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay,", "plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask =", "np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result", "'mobilenet_v1', 'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name", "= [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels',", "in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0]", "= [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir", "labels = res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels", "np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:',", "n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row in image_table: for", "5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None", "one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the", "info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path", "def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path =", "under the License. # ============================================================================== \"\"\"Generic evaluation script that evaluates a model using", "= FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or", "參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"],", "'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0,", "'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images':", "threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the", "saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l = min(w, h)", "get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path", "label_name = labels_to_names[label] if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format(", "index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\", '", "10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is", "= np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr", "= {} for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _", "average it and compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] =", "zip(range(n_classes), colors): label = 'ROC curve of class {0} (area = {1:0.2f})'.format( i,", "defaultdict from itertools import cycle import subprocess import PIL import math import os", "Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i", "= config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names", "im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml)", "'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert(", "open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def", "= {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw)", "coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':',", "confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis", "plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1)", "where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the", "color = np.array([255, 200, 0]).astype(float) / 255 # orange paint[:, :] *= color", "master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model was written", "false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate", "print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels)))", "= network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables())", "image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} #", "tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing to use. If left '", "or agreed to in writing, software # distributed under the License is distributed", "f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np,", "params = { k: v for k, v in info.items() if isinstance(v, tf.Tensor)", "get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for f in", "dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string(", "] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions", "'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg',", "model using a given dataset.\"\"\" from __future__ import absolute_import from __future__ import division", "model was written to or an absolute path to a ' 'checkpoint file.')", "tf_global_step else: variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions,", "tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example)", "if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None):", "Select the model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn(", "image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/',", "], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated", "roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "tensor x = np.abs(x) x = np.max(x, axis=2) x -= x.mean() std =", "- intensify_factor * saliency.astype(float) / 255, 0, 1) paint = np.copy(1 - alpha)", "images, prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map", "image_np, coreml_file_path=None): import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file", "def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [", "# First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in", "governing permissions and # limitations under the License. # ============================================================================== \"\"\"Generic evaluation script", "for j in range(n): image = images[j] grad_img = grad_imgs[j] label = labels[j]", "label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top", "= '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config)", "License. # You may obtain a copy of the License at # #", "= auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"],", "the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for", "interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in", "return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0] / 2 half_h", "graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name", "input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) #", "np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns)", "/ min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l in im.size) return im.resize(target_size,", "= 224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr =", "target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0 arr -=", "-= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN return", "linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title", "return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names =", "= np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1) paint = np.copy(1", "labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path):", "0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): #", "expand dims to shape [None, 299, 299, 3] image_np = np.expand_dims(image_np, 0) graph", "labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', #", "#################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ##############################################################", "list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools import tfcoreml", "output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr =", "plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels =", "info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions,", "name, value in names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value,", "Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the", "in res.keys(): value = res[k] if k == 'confusion_matrix': if k not in", "as sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name]", "= checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch", "i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i]) # Finally average it and", "num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn,", "None, 'The decay to use for the moving average.' 'If left as None,", "'labels': labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs':", "probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for i", "= plt.subplot() fig, ax = plt.subplots() # the size of A4 paper fig.set_size_inches(18,", "False def deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x) x = np.max(x,", "https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for f in fm.ttflist) output =", "= PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index =", "h) saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "If left ' 'as `None`, then the model_name flag is used.') tf.app.flags.DEFINE_float( 'moving_average_decay',", "from tensorflow.python.training import monitored_session from datasets.plants import read_label_file from datasets import dataset_factory from", "of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset files", "FLAGS.dataset_split_name, dataset_dir) #################### # Select the model # #################### num_classes = (dataset.num_classes -", "of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config)", "test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), #", "+= 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve,", "1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix =", "= monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator)", "len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row in image_table: for col in", "-1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'),", "'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys,", "central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im,", "pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size", "target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -= _R_MEAN", "l) / 2) h_offset = int((h - l) / 2) roi_img = canvas[w_offset:w_offset", "as np import tensorflow as tf from tensorflow.python.training import monitored_session from datasets.plants import", "return im.crop( (half_w - w / 2, half_h - h / 2, half_w", "op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) #", "for the moving average.' 'If left as None, then moving averages are not", "list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) >", "probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits", "= cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset +", "= plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask", "info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions =", "h_offset:h_offset + l] intensify_factor = 3 alpha = np.clip(1 - intensify_factor * saliency.astype(float)", "FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore = slim.get_variables_to_restore()", "print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(),", "BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in each batch.')", "= tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] = tf_global_step else: variables_to_restore", "= deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2)", "get output tensor tensor_map = { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def", "for i in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:,", "deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group()", "coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True if convert_model: extra_args", "len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir =", "dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),", "[ 'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list =", "im.size[1] / 2 return im.crop( (half_w - w / 2, half_h - h", "with open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True)", "= len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row in image_table: for col", "isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels =", "and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"],", "= read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs", "for filename, label in filenames: filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename)", "color = np.array([0, 0, 255]).astype(float) / 255 # blue else: color = np.array([255,", "p[0]) for k, v in kv_pairs: print(k, v) def save_var(directory, file_name, info): import", "'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels =", "dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model # #################### num_classes =", "coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name =", "tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue',", "cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True", "key=lambda p: p[0]) for k, v in kv_pairs: print(k, v) def save_var(directory, file_name,", "im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0] / 2", "* central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的", "\"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name]", "= set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True)", "1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path =", "np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr =", "-_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale':", "script that evaluates a model using a given dataset.\"\"\" from __future__ import absolute_import", "checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix,", "f in output.split('\\n')) available = list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available", "= 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml:", "= True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path", "range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir", "= fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:',", "index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name == label def", "= coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten()", "checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits)", "blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False,", "= preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size,", "# 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875) target_smallest_size = 224", "filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path,", "FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model # #################### num_classes = (dataset.num_classes", "'Max number of batches to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '',", "tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in", "'{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not None else prefix, label_count_map[label]) saliency", "= Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k, v in kv_pairs:", "= tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name =", "##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size =", "matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN", "k, v in info.items() if isinstance(v, tf.Tensor) and (not keys or k in", "= sorted(dict(c).items(), key=lambda p: p[0]) for k, v in kv_pairs: print(k, v) def", "@cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command()", "in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params = { k: v for", "or implied. # See the License for the specific language governing permissions and", "info['labels'] predictions = info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return", "open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file,", "display found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim =", "if aggregated is not None: return aggregated calculate_confusion_matrix = True info = get_info(config,", "im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0] / 2 half_h =", "info): import h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k,", "of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing", "in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k not", "nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY',", "= os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False):", "in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0] /", "= _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions = info['predictions']", "as None, then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image", "as sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params =", "w / 2, half_h - h / 2, half_w + w / 2,", "+ overlap.std() > 128: color = np.array([0, 0, 255]).astype(float) / 255 # blue", "aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else: if k not in aggregated:", "color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':',", "math import os from PIL import Image import cv2 import numpy as np", "the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select", "of threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of", "info['predictions'] probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir", "for the labels in the dataset. This flag is primarily used to '", "file_name) f = h5py.File(info_file_path, 'w') for k, v in info.items(): f[k] = v", "'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number", "tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in the dataset. This flag", "image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads,", "read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg',", "run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index]", "range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params = { k: v for k,", "grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend,", "are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in the dataset.", "if not coreml: arr[:, :, 0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN", "pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): #", "length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path", "############################################################## # Create a dataset provider that loads data from the dataset #", "config = yaml.load(f) keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached)", "to use for the moving average.' 'If left as None, then moving averages", "read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits,", "specific language governing permissions and # limitations under the License. # ============================================================================== \"\"\"Generic", "FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key)", "= get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator =", "mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted", "labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else i for", "l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w, h): half_w = im.size[0]", "output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np})", "}, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255.,", "= int((h - l) / 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset +", "np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)]))", "yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f:", "names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if", "= [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples))", "* target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l in im.size)", "a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved", "= 1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l", "'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model", "with open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file):", "This ensures that we make a single pass over all of the data.", "image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name", "index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None):", "'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0", "alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img return canvas def", "return available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] =", "logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), }", "aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len", "Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC", "use this file except in compliance with the License. # You may obtain", "{ 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name')", "the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing to", "= [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs',", "chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row in", "'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys =", "res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length',", "+ l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir =", "Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN", "* FLAGS.batch_size) #################### # Define the model # #################### logits, _ = network_fn(images)", "# cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] =", "traceback traceback.print_exc() raise for k in res.keys(): value = res[k] if k ==", "Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "tensor_map = { 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result", "to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model was written to", "'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), }", "to screen. for name, value in names_to_values.items(): summary_name = 'eval/%s' % name op", "roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel()", "= aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5',", "plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path,", "micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure()", "file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False):", "cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in zip(range(n_classes), colors): label =", "OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor", "= {} fpr = {} roc_auc = {} for i in range(len(possible_labels)): y_scores", "= result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert", "np import tensorflow as tf from tensorflow.python.training import monitored_session from datasets.plants import read_label_file", "aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) #", "ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string(", "for the specific language governing permissions and # limitations under the License. #", "prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency)", "'dataset_split_name', 'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory", "'dataset_name', 'plants', 'The name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The", "} try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc()", "return arr def pre_process(config, im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet,", "} if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return", "the preprocessing to use. If left ' 'as `None`, then the model_name flag", "def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio", "= coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names", "[] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples", "[] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels))", "np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend =", "mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series = 'highest_probability' i_optimal_micro", "= slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics:", "dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The name of the architecture to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name',", "name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset", "Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits,", "}[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs", "= np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l = min(w, h) saliency =", "cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w - l) / 2) h_offset", "PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0]", "# Print the summaries to screen. for name, value in names_to_values.items(): summary_name =", "coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2]", "the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing(", "image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image,", "Select the dataset # ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### #", "-f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n')) available =", "normalize tensor x = np.abs(x) x = np.max(x, axis=2) x -= x.mean() std", "# http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ]", "used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for the moving average.' 'If", "in keys) } try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except: import", "key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config):", "= v f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory,", "predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes - FLAGS.labels_offset) labels =", "# convert_model = True if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN,", "dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model # ####################", "else: # This ensures that we make a single pass over all of", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "tensorflow as tf from tensorflow.python.training import monitored_session from datasets.plants import read_label_file from datasets", "h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f: return {", "# From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for f in fm.ttflist)", "= sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get", "results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used to", "key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr =", "single pass over all of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path", "datasets.plants import read_label_file from datasets import dataset_factory from nets import nets_factory from preprocessing", "in names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op", "l) / 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor =", "i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def", "image_np[:, :] w_offset = int((w - l) / 2) h_offset = int((h -", "to or an absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/',", "[saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if", "run_info(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file')", "arr /= 255.0 arr -= 0.5 arr *= 2.0 return arr def pre_process(config,", "load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir', None,", "import cv2 import numpy as np import tensorflow as tf from tensorflow.python.training import", "= res[k] if k == 'confusion_matrix': if k not in aggregated: aggregated[k] =", "as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file)", "= get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None:", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return", "image_size)) # expand dims to shape [None, 299, 299, 3] image_np = np.expand_dims(image_np,", "_plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples =", "return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images',", "saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list,", "tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html", "= np.max(x, axis=2) x -= x.mean() std = x.std() if std: x /=", "= { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch size is 1 output_tensor_name", "FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy':", "- l) / 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor", "nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider that loads data", "optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange',", "backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN", "OSError: pass for j in range(n): image = images[j] grad_img = grad_imgs[j] label", "tpr = {} fpr = {} roc_auc = {} for i in range(len(possible_labels)):", "ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for", "is used.') tf.app.flags.DEFINE_float( 'moving_average_decay', None, 'The decay to use for the moving average.'", "target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -= _R_MEAN arr[:,", "curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1],", "@click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f: config = yaml.load(f)", "n = confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion", "absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the", "with the License. # You may obtain a copy of the License at", "# https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent')", "for f in output.split('\\n')) available = list(mat_fonts & zh_fonts) return available def set_matplot_zh_font():", "pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[", "= cv2.resize(image_np, (image_size, image_size)) # expand dims to shape [None, 299, 299, 3]", "# Plot all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes:", "= np.abs(x) x = np.max(x, axis=2) x -= x.mean() std = x.std() if", "filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename,", "title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if", "k: v for k, v in info.items() if isinstance(v, tf.Tensor) and (not keys", "if std: x /= std x *= target_std x *= 255 x =", "use a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1', 'The", "mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix,", "micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC", "size of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix <", "= get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] =", "std = x.std() if std: x /= std x *= target_std x *=", "tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = {", "= plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True #", "= { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': {", "dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size)", "np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\"", "input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False)", "# normalize tensor x = np.abs(x) x = np.max(x, axis=2) x -= x.mean()", "result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name", "law or agreed to in writing, software # distributed under the License is", "as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached):", "BATCH_SIZE, 'The number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number", "[] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def", "!= 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"],", "filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np,", "'blue_bias': -1.0, 'image_scale': 2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel',", "import yaml from collections import Iterable, defaultdict from itertools import cycle import subprocess", "number of batches to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The", "get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', #", "'moving_average_decay', None, 'The decay to use for the moving average.' 'If left as", "monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def", "None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir') def", "FLAGS.batch_size) #################### # Define the model # #################### logits, _ = network_fn(images) if", "target_size = tuple(int(resize_ratio * l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def", "get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for", "subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f in output.split('\\n'))", "= labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i in", "= OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model =", "= 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE", "def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 = central_crop_by_fraction(im, 0.875)", "plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize", "labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int)", "yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as", "or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return", "ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else", "= labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format(", "# common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE,", "keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities =", "+ \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name(", "print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction", "image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims to shape [None, 299, 299,", "in image_table: for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1", "as tf from tensorflow.python.training import monitored_session from datasets.plants import read_label_file from datasets import", "The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License,", "deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x) x = np.max(x, axis=2) x", "import numpy as np import tensorflow as tf from tensorflow.python.training import monitored_session from", "= res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique", "len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached:", "= 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in each batch.') tf.app.flags.DEFINE_integer(", "[ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ]", "{ k: v for k, v in info.items() if isinstance(v, tf.Tensor) and (not", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name']", "tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to evaluate by default use all.')", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "label = labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10: continue file_name =", "'master', '', 'The address of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None,", "# Select the model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn =", "\":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name)", "to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are", "w / 2, half_h + h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size =", "is not None: return aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches", "std x *= target_std x *= 255 x = np.clip(x, 0, 255).astype('uint8') return", "import Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p:", "# ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2", "if overlap.mean() + overlap.std() > 128: color = np.array([0, 0, 255]).astype(float) / 255", "the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels", "get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss =", "= Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]} # batch", "l = min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB)", "# batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file)", "= plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap =", "tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir =", "'labels_offset', 0, 'An offset for the labels in the dataset. This flag is", "keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config)", "sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map, feed_dict={input_tensor: image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'),", "1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image,", "image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits,", "from matplotlib.font_manager import FontManager import matplotlib if os.environ.get('DISPLAY', '') == '': print('no display", "print(k, v) def save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory, file_name) f", "0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"])", "central_crop_by_fraction(im, 0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32)", "+ l, h_offset:h_offset + l] intensify_factor = 3 alpha = np.clip(1 - intensify_factor", "slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix", "use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures that we", "k: f[k][:] for k in f.keys() } except IOError: return None def chunks(l,", "of class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i],", "canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor = 3 alpha = np.clip(1 -", "datasets import dataset_factory from nets import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager", "in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to evaluate by", "= set(f.split(',', 1)[0] for f in output.split('\\n')) available = list(mat_fonts & zh_fonts) return", "with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def", "tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file", "num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures that we make", "def central_crop(im, w, h): half_w = im.size[0] / 2 half_h = im.size[1] /", "cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label", "axis=1)[:, None], y_score_matrix, 0) tpr = {} fpr = {} roc_auc = {}", "raise for k in res.keys(): value = res[k] if k == 'confusion_matrix': if", "# sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap)", "softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return {", "checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np)", "'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections", "= 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass", "from collections import Iterable, defaultdict from itertools import cycle import subprocess import PIL", "10)) i = 1 for row in image_table: for col in row: plt.subplot(n_row,", "'max_num_batches', None, 'Max number of batches to evaluate by default use all.') tf.app.flags.DEFINE_string(", "provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images = image ##################################### # Select the preprocessing", "= resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not", "labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = []", "#################### # Select the model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn", "get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def cli(): pass", "image_np, saliency, ], 2) @click.group() def cli(): pass @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def", "tf.Tensor) and (not keys or k in keys) } try: feed_dict = {}", "keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names,", "tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all", "plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i", "graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) return _run_inference_by_graph_def(config, graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False):", "aggregated[k] += np.matrix(value) else: if k not in aggregated: aggregated[k] = [] if", "Plot all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for", "checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator", "probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None):", "} def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, # scaffold=scaffold, # master=master, # config=config", "{ k: f[k][:] for k in f.keys() } except IOError: return None def", "# #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step)", "length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated", "1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate')", "is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False", "plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]),", "graph_def, image_np) def _run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224", "dataset_dir) #################### # Select the model # #################### num_classes = (dataset.num_classes - FLAGS.labels_offset)", "/ 2, half_w + w / 2, half_h + h / 2)) def", "labels = tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions,", "prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map =", "pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def =", "and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names", "filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path", "= os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names = read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results", "res.keys(): value = res[k] if k == 'confusion_matrix': if k not in aggregated:", "i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr", "None, 'The name of the preprocessing to use. If left ' 'as `None`,", "np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this", "Iterable, defaultdict from itertools import cycle import subprocess import PIL import math import", "tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2", "VGG and ResNet architectures which do not use a background ' 'class for", "in f.keys() } except IOError: return None def chunks(l, n): \"\"\"Yield successive n-sized", "im.size[1] return central_crop(im, w * central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): #", "central_fraction, h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "plot_all_classes: for i, color in zip(range(n_classes), colors): label = 'ROC curve of class", "return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for i in range(5):", "= y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve(", "FLAGS.labels_offset raw_images = image ##################################### # Select the preprocessing function # ##################################### preprocessing_name", "half_w = im.size[0] / 2 half_h = im.size[1] / 2 return im.crop( (half_w", "'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config,", "'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias': -1.0, 'image_scale': 2.0 / 255., }", "# key_series = 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr", "PIL import math import os from PIL import Image import cv2 import numpy", "successive n-sized chunks from l.\"\"\" return [l[i:i + n] for i in range(0,", "Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png')", "h_offset:h_offset + l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir", "is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels", "cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l,", "\"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\" from __future__", "roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel", "do not use a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name',", "dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np = PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits']", "n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1 for row in image_table:", "available = list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font() if", "0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" #", "(not keys or k in keys) } try: feed_dict = {} res =", "= dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency):", "-= _G_MEAN arr[:, :, 2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w", "y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) #", "_R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50':", "checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions',", "= images[j] grad_img = grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if label_count_map[label]", "labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import", "subprocess import PIL import math import os from PIL import Image import cv2", "this file except in compliance with the License. # You may obtain a", "dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np", "1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l in", "yaml from collections import Iterable, defaultdict from itertools import cycle import subprocess import", "= {} res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k", "f: config = yaml.load(f) keys = [ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys,", "in range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores)", "scaffold=scaffold, # master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'):", "arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h = im.size[1] return central_crop(im, w", "return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'),", "= coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image =", "0] -= _R_MEAN arr[:, :, 1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN", "as f: return { k: f[k][:] for k in f.keys() } except IOError:", "run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list =", "= tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record)", "= labels_to_names[index] grad_imgs = results['grad_imgs'] saliency = deprocess_image(grad_imgs[0]) blend = get_image_with_saliency_map(image_np, saliency) print(prediction_name)", "to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset to", "}[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None,", "model_name, num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider that loads data from", "that we make a single pass over all of the data. num_batches =", "'_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs,", "1) paint = np.copy(1 - alpha) * 255 overlap = roi_img[paint > 128]", "info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k, v in info.items():", "u'酢漿草'), ] for filename, label in filenames: filename = os.path.join(dataset_dir, filename) image_np =", "'{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not None else prefix,", "cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w - l) / 2)", "filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path =", "use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached): with open(config_file) as f: config", "'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn':", "def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path,", "are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used to create", "checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step =", "moving average.' 'If left as None, then moving averages are not used.') tf.app.flags.DEFINE_integer(", "read_label_file from datasets import dataset_factory from nets import nets_factory from preprocessing import preprocessing_factory", "= run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name =", "probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels", "row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name, 'saved')", "def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for i in range(5): tfrecords_filename", "print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts =", "output_tensor_name) # get output tensor tensor_map = { 'logits': output_tensor, } if enable_saliency_maps:", "file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics", "+ 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ],", "@click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f: config = yaml.load(f)", "'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir", "preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images,", "except: import traceback traceback.print_exc() raise for k in res.keys(): value = res[k] if", "names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits,", "or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config, image_np, coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB')", "= read_label_file(dataset_dir) image_np = PIL.Image.open(filename) results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits']", "all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for", "dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if __name__ == '__main__':", "= [] for i in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples))", "get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h = image_np.shape[0:2] l = min(w,", "print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, '", "# #################### num_classes = (dataset.num_classes - FLAGS.labels_offset) network_fn = nets_factory.get_network_fn( model_name, num_classes=num_classes, is_training=False)", "= np.array([255, 200, 0]).astype(float) / 255 # orange paint[:, :] *= color roi_img", "def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path')", "for i, color in zip(range(n_classes), colors): label = 'ROC curve of class {0}", "= results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs = results['grad_imgs'] saliency", "103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE =", "@cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def run_info(config_file, use_cached): with open(config_file) as f: config =", "labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if", "result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3", "logits) index = np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\",", "= sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map = { 'logits': output_tensor, }", "f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as", "cv2.resize(image_np, (image_size, image_size)) # expand dims to shape [None, 299, 299, 3] image_np", "cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'):", "label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area", "index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i", "dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size /", "checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as", "print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits)", "return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() #", "given dataset.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "= np.clip(x, 0, 255).astype('uint8') return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list,", "y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr = {}", "plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"], label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0,", "= get_image_with_saliency_map(image_np, saliency) print(prediction_name) plot_image_in_grids([ blend, image_np, saliency, ], 2) @click.group() def cli():", "= {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0' probs =", "a model using a given dataset.\"\"\" from __future__ import absolute_import from __future__ import", "yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title and ticks", "examples = [] for string_record in record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) #", "info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess:", "tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"],", "grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10: continue file_name", "probabilities = info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir =", "not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image /", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map =", "confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator( checkpoint_filename_with_path=checkpoint_path, #", "= { k: v for k, v in info.items() if isinstance(v, tf.Tensor) and", "using a given dataset.\"\"\" from __future__ import absolute_import from __future__ import division from", "== label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config,", "3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess:", "First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))", "in range(n): image = images[j] grad_img = grad_imgs[j] label = labels[j] label_name =", "use_cached=use_cached) logits_list = info['logits'] labels = info['labels'] predictions = info['predictions'] probabilities = info['probabilities']", "index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction", "label='ROC curve (area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0,", "info['probabilities'] _plot_roc(logits_list, labels, predictions, probabilities, save_dir=checkpoint_dir_path) return def inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples", "get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename)", "{1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color, lw=lw, label=label) plt.plot(fpr[\"highest_probability\"], tpr[\"highest_probability\"],", "range(len(possible_labels)): y_scores = y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i]", "'__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) image_np =", "auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0", "= h5py.File(info_file_path, 'w') for k, v in info.items(): f[k] = v f.close() print(info_file_path,", "import PIL import math import os from PIL import Image import cv2 import", "= get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys,", "h * central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1", "labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes,", "file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency, image, file_name=None): plt.figure(figsize=(15,", "import matplotlib if os.environ.get('DISPLAY', '') == '': print('no display found. Using non-interactive Agg", "output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map = { 'logits': output_tensor,", ") coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example output: 'resnet_v2_50__predictions__Reshape_1__0'", "* 255 overlap = roi_img[paint > 128] if overlap.mean() + overlap.std() > 128:", "test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg',", "required by applicable law or agreed to in writing, software # distributed under", "'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path):", "print('no display found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim", "size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model =", "# 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 * FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label]", "eval_image_size) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size)", "i in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None): import coremltools", "print('optimal_threshold:', optimal_threshold) # Plot all ROC curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])", "def _run_info(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', # 'raw_images',", "'labels', 'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images", "c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k, v in", "directory where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for", "FLAGS.batch_size, common_queue_min=FLAGS.batch_size) # common_queue_min=FLAGS.batch_size) [image, label] = provider.get(['image', 'label']) label -= FLAGS.labels_offset raw_images", "= None # Print the summaries to screen. for name, value in names_to_values.items():", "of A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01,", "= aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels'])", "= aggregated['labels'] c = Counter(all_labels) kv_pairs = sorted(dict(c).items(), key=lambda p: p[0]) for k,", "'.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config, image_np, coreml_file_path=None):", "labels in the dataset. This flag is primarily used to ' 'evaluate the", "confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap =", "index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names =", "h_offset = int((h - l) / 2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset", "i], y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive", "aggregated['labels'] print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated)", "plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir,", "output.split('\\n')) available = list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font()", "[l[i:i + n] for i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images,", "= ((image / 2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([", "< 0.01, # confusion_matrix) cmap = plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap", "None, then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size')", "Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name ==", "\":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True if convert_model:", "coreml=False): target_smallest_size = 224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size)", "= get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config, pb_file_path=None, dataset_dir=None): # http://www.cnblogs.com/arkenstone/p/7551270.html filenames =", "and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i]", "op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This", "# ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size", "filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits)", "'confusion_matrix': if k not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value)", "'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits':", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config) # tf.logging.set_verbosity(tf.logging.INFO)", "roi_img[paint > 128] if overlap.mean() + overlap.std() > 128: color = np.array([0, 0,", "was written to or an absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string(", "= os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 *", "not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config):", "# orange paint[:, :] *= color roi_img = cv2.multiply(alpha, roi_img.astype(float)) roi_img = cv2.add(paint", "def run_info(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _run_info(config, use_cached=use_cached) @cli.command()", "# Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn", "calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {}", "############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False, common_queue_capacity=2 *", "the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where the model", "info.items() if isinstance(v, tf.Tensor) and (not keys or k in keys) } try:", "{ 'logits': output_tensor, } if enable_saliency_maps: tensor_map['grad_imgs'] = sess.graph.get_tensor_by_name( 'gradients/MobilenetV1/MobilenetV1/Conv2d_0/Conv2D_grad/Conv2DBackpropInput:0') result = sess.run(tensor_map,", "image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config) pb_file_path = pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with", "from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) +", "points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])", "convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, },", "prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path =", "/ 2, half_h + h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224", "> 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15):", "optimal_threshold_tpr = tpr[key_series][i_optimal_micro] optimal_threshold = micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) #", "save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'), prefix) if labels is not None else prefix, label_count_map[label])", "to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test split.') tf.app.flags.DEFINE_string( 'dataset_dir',", "image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs", "dataset_dir=dataset_dir) index = result['prediction_label'] print(\"Prediction label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\",", "number of threads used to create the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name", "labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])]))", "aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels'] print('all_labels length', len(all_labels))", "'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def", "get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return", "tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) # Compute macro-average ROC curve and", "= 'micro' key_series = 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro]", "plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions',", "model_name] + \":0\" input_tensor = sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor =", "None, 'The directory where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An", "zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif']", "= tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0':", "= os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f: return { k: f[k][:]", "'resnet_v2_50__predictions__Reshape_1__0' probs = coreml_output[ output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None,", "= central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0]", "y_score_matrix = output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0)", "index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config),", "'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config, 'dataset_dir')", "ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])", "print_function import click import yaml from collections import Iterable, defaultdict from itertools import", "print('all_labels length', len(all_labels)) print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return", "res[k] if k == 'confusion_matrix': if k not in aggregated: aggregated[k] = np.matrix(value)", "read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs =", "get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or", "y_scores) roc_auc[i] = auc(fpr[i], tpr[i]) # 參考 http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html y_score_matrix_ravel = y_score_matrix.ravel() i_positive =", "a dataset provider that loads data from the dataset # ############################################################## provider =", "cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset =", "# labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis =", "image_size = 224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) #", "= FLAGS.max_num_batches else: # This ensures that we make a single pass over", "l, h_offset:h_offset + l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path']", "image_input_names=['input:0'], **extra_args ) coreml_inputs = {'input__0': image} coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=False) # example", "except IOError: return None def chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\"", "label index:\", index) prediction_name = result['prediction_name'] print(\"Prediction name:\", prediction_name) print(\"Top 3 Prediction label", "- FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({", "from PIL import Image import cv2 import numpy as np import tensorflow as", "_ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore(", "examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name", "tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds =", "return None def chunks(l, n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i", "and compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"]", "aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def _run_info(config, use_cached=False): checkpoint_path", "tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return", ":, 1] -= _G_MEAN arr[:, :, 2] -= _B_MEAN return arr def central_crop_by_fraction(im,", "available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]]", "A4 paper fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, #", "rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font():", "get_config_value(config, 'dataset_dir') def get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return", "# you may not use this file except in compliance with the License.", "= tuple(int(resize_ratio * l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im,", "if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name] =", "images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates,", "= micro_thresholds[i_optimal_micro] print('optimal_threshold_fpr:', optimal_threshold_fpr) print('optimal_threshold_tpr:', optimal_threshold_tpr) print('optimal_threshold:', optimal_threshold) # Plot all ROC curves", "filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path, dataset_dir=dataset_dir) index", "import absolute_import from __future__ import division from __future__ import print_function import click import", "dataset_factory from nets import nets_factory from preprocessing import preprocessing_factory from matplotlib.font_manager import FontManager", "coreml=True) image = Image.fromarray(image_np.astype('int8'), 'RGB') input_tensor_shapes = { \"input:0\": [1, image_np.shape[0], image_np.shape[1], 3]}", "checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities', ] info =", "results = run_inference_by_pb(config, image_np, pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name", "predictions=predictions) else: confusion_matrix = None # Print the summaries to screen. for name,", "logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path,", "range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot", "index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name == label def run_inference_by_coreml(config,", "use for the moving average.' 'If left as None, then moving averages are", "#{} of {}'.format(i, num_batches)) params = { k: v for k, v in", "eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size) images, labels =", "& zh_fonts) return available def set_matplot_zh_font(): available = get_matplot_zh_font() if len(available) > 0:", "plt.title('ROC curve') plt.legend(loc=\"lower right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve", "255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes, output_feature_names=[output_tensor_name], image_input_names=['input:0'], **extra_args", "sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches)) params = {", "possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix", "image = images[j] grad_img = grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if", "col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name)", "'images', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images =", "area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i", "_G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT = { 'resnet_v2_50': 'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1',", "-*- coding: utf-8 -*- # Copyright 2016 The TensorFlow Authors. All Rights Reserved.", "k in keys) } try: feed_dict = {} res = sess.run(params, feed_dict=feed_dict) except:", "summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else:", "dataset.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import print_function", "'saved') def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path,", "auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds", "= dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result = run_inference_on_file_pb( config, filename, pb_file_path=pb_file_path,", "for i in list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0],", "'image_scale': 2.0 / 255., } }[model_name] coreml_model = tfcoreml.convert( tf_model_path=frozen_model_file, mlmodel_path=coreml_model_file.replace('.mlmodel', '_test.mlmodel'), input_name_shape_dict=input_tensor_shapes,", "checkpoint_dir_path = get_checkpoint_dir_path(config) if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not", "License for the specific language governing permissions and # limitations under the License.", "cmap=cmap) n = confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels')", "tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None,", "sns set_matplot_zh_font() # ax = plt.subplot() fig, ax = plt.subplots() # the size", "or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image", "tf.app.flags.DEFINE_string( 'dataset_dir', None, 'The directory where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset',", "summaries to screen. for name, value in names_to_values.items(): summary_name = 'eval/%s' % name", "images[j] grad_img = grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if label_count_map[label] >=", "filename = os.path.join(get_dataset_dir(config), filename) return filename def run_inference_by_pb(config, image_np, pb_file_path=None): checkpoint_dir_path = get_checkpoint_dir_path(config)", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "as plt slim = tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN =", "= subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"', shell=True) zh_fonts = set(f.split(',', 1)[0] for f in", "division from __future__ import print_function import click import yaml from collections import Iterable,", "if k not in aggregated: aggregated[k] = np.matrix(value) else: aggregated[k] += np.matrix(value) else:", "0.875) target_smallest_size = 224 im2 = im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if", "] for filename, label in filenames: filename = os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename)", "of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test", "= tf.train.batch( [image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define", "assert prediction_name == label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename", "dataset_dir = get_dataset_dir(config) examples = [] for i in range(5): tfrecords_filename = os.path.join(", "run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits', logits) index = np.argmax(logits) print(\"Prediction label index:\",", "image_np.shape[1], 3]} # batch size is 1 output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[model_name] + \":0\" coreml_model", "as f: config = yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file,", "predictions, dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates", "path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where the results", "= tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for", "Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn =", "arr -= 0.5 arr *= 2.0 return arr def pre_process(config, im, coreml=False): model_name", "label, label_name.encode('utf-8'), prefix) if labels is not None else prefix, label_count_map[label]) saliency =", "collections import Iterable, defaultdict from itertools import cycle import subprocess import PIL import", "= False def deprocess_image(x, target_std=0.15): # normalize tensor x = np.abs(x) x =", "= y_score_matrix[:, i] fpr[i], tpr[i], _ = roc_curve(y_binary[:, i], y_scores) roc_auc[i] = auc(fpr[i],", "address of the TensorFlow master to use.') tf.app.flags.DEFINE_string( 'checkpoint_path', None, 'The directory where", "to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the", "y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve and ROC", "use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir = get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path)", "get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches'] aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config)", "config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns", "compute AUC mean_tpr /= n_classes fpr[\"macro\"] = all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] =", "label = 'ROC curve of class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label", "'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels, 5), }) if calculate_confusion_matrix: confusion_matrix =", "aggregated: aggregated[k] = [] if isinstance(value, Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels']", "names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels,", "at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += np.interp(all_fpr,", "all.') tf.app.flags.DEFINE_string( 'master', '', 'The address of the TensorFlow master to use.') tf.app.flags.DEFINE_string(", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "+ n] for i in range(0, len(l), n)] def save_saliency_maps(config, grad_imgs, images, prefix='',", "in writing, software # distributed under the License is distributed on an \"AS", "get_config_value(config, key): return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def", "tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0", "2 half_h = im.size[1] / 2 return im.crop( (half_w - w / 2,", "+ l] intensify_factor = 3 alpha = np.clip(1 - intensify_factor * saliency.astype(float) /", "roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) # Compute macro-average", "import h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f: return", "return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config,", "arr *= 2.0 return arr def pre_process(config, im, coreml=False): model_name = get_model_name(config) return", "plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm =", "f: return { k: f[k][:] for k in f.keys() } except IOError: return", "= get_model_name(config) image_size = 224 image_np = pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size,", "images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir) except", "1) top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits) result =", "len(possible_labels) # Compute macro-average ROC curve and ROC area # First aggregate all", "'resnet_v2_50/predictions/Reshape_1', 'mobilenet_v1': 'MobilenetV1/Predictions/Reshape_1', } def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The", "2) roi_img = canvas[w_offset:w_offset + l, h_offset:h_offset + l] intensify_factor = 3 alpha", "'labels', 'images', # 'raw_images', 'logits', 'probabilities', 'predictions', 'confusion_matrix', # 'loss', 'grad_imgs', ] aggregated", "= np.argmax(logits) print(\"Prediction label index:\", index) prediction_name = labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list", "i in list(index_list[0])])) print('logits', logits) result = { 'prediction_name': prediction_name, 'prediction_label': index[0], 'top_n_names':", "import coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb'", "def pre_process(config, im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet,", "2] -= _B_MEAN return arr def central_crop_by_fraction(im, central_fraction): w = im.size[0] h =", "ax.set_xlabel('Predicted labels') ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else i", "the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the train/test split.')", "y_score_matrix, 0) tpr = {} fpr = {} roc_auc = {} for i", "color in zip(range(n_classes), colors): label = 'ROC curve of class {0} (area =", "image, file_name=None): plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None,", "image_np}) return { 'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names", "aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels = aggregated['labels'] c", "network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, tf_global_step) variables_to_restore = variable_averages.variables_to_restore( slim.get_model_variables()) variables_to_restore[tf_global_step.op.name]", "primarily used to ' 'evaluate the VGG and ResNet architectures which do not", "label index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert prediction_name == label", "From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name for f in fm.ttflist) output", "top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir =", "i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive])", "auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels,", "get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name =", "from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels)", "= yaml.load(f) _run_saliency_maps(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def confusion_matrix(config_file, use_cached): with open(config_file)", "= cv2.add(paint * (1 - alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l]", "an absolute path to a ' 'checkpoint file.') tf.app.flags.DEFINE_string( 'eval_dir', '/tmp/tfmodel/', 'Directory where", "arr def pre_process(config, im, coreml=False): model_name = get_model_name(config) return { 'resnet_v2_50': pre_process_resnet, 'mobilenet_v1':", "= 'highest_probability' i_optimal_micro = np.argmax(tpr[key_series] - fpr[key_series]) optimal_threshold_fpr = fpr[key_series][i_optimal_micro] optimal_threshold_tpr = tpr[key_series][i_optimal_micro]", "min(list(im.size)) target_size = tuple(int(resize_ratio * l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR)", "in info.items() if isinstance(v, tf.Tensor) and (not keys or k in keys) }", "aggregated = {} checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for", "coremltools import tfcoreml model_name = get_model_name(config) checkpoint_dir_path = get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' %", "predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs, } def get_monitored_session(checkpoint_path): session_creator = monitored_session.ChiefSessionCreator(", "# cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black') mask = np.zeros_like(confusion_matrix)", "else i for i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir,", "grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config))", "for col in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if file_name:", "file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15, 10)) i = 1", "+ \":0\" coreml_model = coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True if", "= roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute micro-average ROC curve", "pb_file_path=frozen_graph_path) logits = results['logits'] index = np.argmax(logits, 1)[0] prediction_name = labels_to_names[index] grad_imgs =", "labels, 'logits': logits, 'probabilities': probabilities, 'predictions': predictions, 'confusion_matrix': confusion_matrix, 'loss': softmax_cross_entropy_loss, 'grad_imgs': grad_imgs,", "'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved') print('plot shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm", "2 + 0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend,", "0) tpr = {} fpr = {} roc_auc = {} for i in", "labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax = plt.subplot() fig, ax", "get_dataset_dir(config) labels_to_names = read_label_file(dataset_dir) plot_confusion_matrix(aggregated['confusion_matrix'], labels_to_names=labels_to_names, save_dir=checkpoint_dir_path) if __name__ == '__main__': define_tf_flags() cli()", "*= target_std x *= 255 x = np.clip(x, 0, 255).astype('uint8') return x def", "not use a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'model_name', 'mobilenet_v1',", "if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures that we make a", "keys=keys, use_cached=use_cached) from collections import Counter all_labels = aggregated['labels'] c = Counter(all_labels) kv_pairs", "np.array([255, 200, 0]).astype(float) / 255 # orange paint[:, :] *= color roi_img =", "test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :] w, h =", "2.0 (the \"License\"); # you may not use this file except in compliance", "to shape [None, 299, 299, 3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def,", "input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[ model_name] + \":0\"", "return config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path", "provider that loads data from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset,", "= list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix =", "tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use", "convert_model = True if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias':", "get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) return checkpoint_path def inspect_tfrecords(tfrecords_filename):", "inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator: example =", "a single pass over all of the data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size))", "config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename = dataset_dir_file('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg') labels_to_names =", "the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used", "micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel()) roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes", "im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if", "pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims to shape [None,", "for name, value in names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name,", "- alpha) * 255 overlap = roi_img[paint > 128] if overlap.mean() + overlap.std()", "labels_to_names[label] if label_count_map[label] >= 10: continue file_name = '{}/{}{:03d}.jpg'.format( save_dir, '{:02}_{}_{}'.format( label, label_name.encode('utf-8'),", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "print('all_labels unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config,", "defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j in range(n): image = images[j]", "'%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np = pre_process(config,", "get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:,", "im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :,", "os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in range(5): tfrecords_filename", "255 overlap = roi_img[paint > 128] if overlap.mean() + overlap.std() > 128: color", "curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr +=", "# # Unless required by applicable law or agreed to in writing, software", "left as None, then moving averages are not used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval", "# blue else: color = np.array([255, 200, 0]).astype(float) / 255 # orange paint[:,", "num_classes=num_classes, is_training=False) ############################################################## # Create a dataset provider that loads data from the", "express or implied. # See the License for the specific language governing permissions", "x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table) plt.figure(figsize=(15,", "canvas = image_np[:, :] w_offset = int((w - l) / 2) h_offset =", "# This ensures that we make a single pass over all of the", "Iterable): aggregated[k].extend(value) else: aggregated[k].append(value) labels = res['labels'] print('len labels', len(labels)) all_labels = aggregated['labels']", "# ax = plt.subplot() fig, ax = plt.subplots() # the size of A4", "return x def plot_image_in_grids(image_list, n_columns, file_name=None): image_table = chunks(image_list, n_columns) n_row = len(image_table)", "def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator: example", "saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels, predictions,", "= im.size[1] / 2 return im.crop( (half_w - w / 2, half_h -", "= labels_to_names[index] print(\"Prediction name:\", prediction_name) index_list = np.argsort(logits) print(\"Top 3 Prediction label index:\",", "file_name, info): import h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for", "click import yaml from collections import Iterable, defaultdict from itertools import cycle import", "= pb_file_path or '%s/frozen_graph.pb' % checkpoint_dir_path with tf.gfile.GFile(pb_file_path) as f: graph_def = tf.GraphDef()", "the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k( logits, labels,", "in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path, 'saved')", "299, 299, 3] image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph)", "either express or implied. # See the License for the specific language governing", "# ###################### dataset = dataset_factory.get_dataset( FLAGS.dataset_name, FLAGS.dataset_split_name, dataset_dir) #################### # Select the model", "print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config) model_name = get_model_name(config)", "if use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return aggregated", "'The decay to use for the moving average.' 'If left as None, then", "example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None, calculate_confusion_matrix=False): dataset_dir = get_dataset_dir(config)", "- alpha), roi_img).astype(int) canvas[w_offset:w_offset + l, h_offset:h_offset + l] = roi_img return canvas", "Matrix') axis = [labels_to_names[i] if labels_to_names else i for i in range(n)] ax.xaxis.set_ticklabels(axis,", "= run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list", "h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k, v in", "f.close() print(info_file_path, 'saved') def load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name) try:", "result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None, coreml_file_path=None): dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path,", "load_var(directory, file_name): import h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as", "+= 1 if file_name: plt.savefig(file_name) print(file_name, 'saved') else: print('plot shown') plt.show() def plot_saliency(saliency,", "is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached)", "= auc(fpr[\"micro\"], tpr[\"micro\"]) lw = 2 n_classes = len(possible_labels) # Compute macro-average ROC", "tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ###################### dataset = dataset_factory.get_dataset(", "tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print the summaries to screen.", "ax.set_ylabel('True labels') ax.set_title('Confusion Matrix') axis = [labels_to_names[i] if labels_to_names else i for i", "plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('ROC curve') plt.legend(loc=\"lower", "plt.figure(figsize=(15, 10)) i = 1 for row in image_table: for col in row:", "os.path.join(dataset_dir, filename) image_np = PIL.Image.open(filename) logits = run_inference_by_coreml( config, image_np, coreml_file_path=coreml_file_path, ) print('logits',", "def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import seaborn as sns set_matplot_zh_font() # ax = plt.subplot()", "y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities) y_score_matrix = output_matrix y_score_matrix = np.where(", "the License. # You may obtain a copy of the License at #", "u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename", "matplotlib if os.environ.get('DISPLAY', '') == '': print('no display found. Using non-interactive Agg backend')", "def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys = [ 'labels', 'images', 'grad_imgs', ]", "micro-average ROC curve and ROC area fpr[\"micro\"], tpr[\"micro\"], micro_thresholds = roc_curve( y_binary.ravel(), y_score_matrix.ravel())", "prediction_name == label def dataset_dir_file(config, filename): filename = os.path.join(get_dataset_dir(config), filename) return filename def", "_run_inference_by_graph_def(config, graph_def, image_np, enable_saliency_maps=False): model_name = get_model_name(config) image_size = 224 image_np = pre_process(config,", "positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all", "import division from __future__ import print_function import click import yaml from collections import", "input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor", "# get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map", "label_count_map = defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j in range(n): image", "op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches:", "tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return get_config_value(config,", "res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k in res.keys():", "255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label]", "'r') as f: return { k: f[k][:] for k in f.keys() } except", "if plot_all_classes: for i, color in zip(range(n_classes), colors): label = 'ROC curve of", "('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'), ] for filename, label in filenames: filename =", "'loss', 'grad_imgs', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) from collections import Counter all_labels", "} def define_tf_flags(): BATCH_SIZE = 100 tf.app.flags.DEFINE_integer( 'batch_size', BATCH_SIZE, 'The number of samples", "np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee',", "preprocessing_factory.get_preprocessing( preprocessing_name, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size image = image_preprocessing_fn(image, eval_image_size, eval_image_size)", "set_matplot_zh_font() # ax = plt.subplot() fig, ax = plt.subplots() # the size of", "cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues') # cmap.set_bad(color='black')", "label in filenames: filename = dataset_dir_file(config, filename) # image_np = cv2.imread(filename) result =", "= pre_process(config, image_np) image_np = cv2.resize(image_np, (image_size, image_size)) # expand dims to shape", "= tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0) grad_imgs = tf.gradients(softmax_cross_entropy_loss, images)[0] return { 'labels_to_names':", "print('batch #{} of {}'.format(i, num_batches)) params = { k: v for k, v", "monitored_session from datasets.plants import read_label_file from datasets import dataset_factory from nets import nets_factory", "def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename", "confusion_matrix = None # Print the summaries to screen. for name, value in", "curve of class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i],", "= np.zeros_like(confusion_matrix) mask[confusion_matrix == 0] = True # sns.set(font_scale=1) with sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2,", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "Image import cv2 import numpy as np import tensorflow as tf from tensorflow.python.training", "print(\"Top 3 Prediction label index:\", index_list, ' '.join([labels_to_names[i] for i in list(index_list)])) assert", "confusion_matrix = tf.confusion_matrix(labels=labels, num_classes=num_classes, predictions=predictions) else: confusion_matrix = None # Print the summaries", "resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size = tuple(int(resize_ratio * l) for", "is not None else prefix, label_count_map[label]) saliency = deprocess_image(grad_img, target_std=0.3) restored_image = ((image", "test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config) frozen_graph_path = os.path.join(checkpoint_dir, 'frozen_graph.pb') filename =", "'label']) label -= FLAGS.labels_offset raw_images = image ##################################### # Select the preprocessing function", "config.get(key) or getattr(FLAGS, key) def get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path =", "aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images, prefix, labels=aggregated['labels']) def", "(area = {0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--',", "= tf.contrib.slim _R_MEAN = 123.68 _G_MEAN = 116.78 _B_MEAN = 103.94 OUTPUT_MODEL_NODE_NAMES_DICT =", "class {0} (area = {1:0.2f})'.format( i, roc_auc[i]) label = None plt.plot(fpr[i], tpr[i], color=color,", "None, 'The directory where the model was written to or an absolute path", "tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"] = auc(fpr[\"highest_probability\"], tpr[\"highest_probability\"]) # Compute", "/ 2 return im.crop( (half_w - w / 2, half_h - h /", "tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output tensor tensor_map = { 'logits':", "set(f.split(',', 1)[0] for f in output.split('\\n')) available = list(mat_fonts & zh_fonts) return available", "Prediction label index:\", ' '.join(result['top_n_names'])) assert prediction_name == label def dataset_dir_file(config, filename): filename", "use_cached=use_cached) grad_imgs = aggregated['grad_imgs'] images = aggregated['images'] prefix = '' save_saliency_maps(config, grad_imgs, images,", "labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss = tf.losses.softmax_cross_entropy( one_hot_predictions, logits, label_smoothing=0.0, weights=1.0)", "the model # #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages = tf.train.ExponentialMovingAverage(", "central_crop(im, w, h): half_w = im.size[0] / 2 half_h = im.size[1] / 2", "checkpoint_path = checkpoint_path or get_lastest_check_point(config) with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))):", "tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series =", "PIL.Image.open(filename) logits = run_inference_by_pb(config, image_np, pb_file_path=pb_file_path)[ 'logits'] index = np.argmax(logits, 1) prediction_name =", "ROC curve and ROC area # First aggregate all false positive rates all_fpr", "'labels_to_names': labels_to_names, 'checkpoint_path': checkpoint_path, 'num_batches': num_batches, 'names_to_values': names_to_values, 'names_to_updates': names_to_updates, 'variables_to_restore': variables_to_restore, 'images':", "dataset provider that loads data from the dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider(", "# Compute macro-average ROC curve and ROC area # First aggregate all false", "_run_info(config, use_cached=use_cached) @cli.command() @click.argument('config_file') def test_models(config_file): with open(config_file) as f: config = yaml.load(f)", "get_checkpoint_dir_path(config): return get_config_value(config, 'checkpoint_path') def get_lastest_check_point(config): checkpoint_path = get_checkpoint_dir_path(config) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path =", "the batches.') tf.app.flags.DEFINE_string( 'dataset_name', 'plants', 'The name of the dataset to load.') tf.app.flags.DEFINE_string(", "curves plt.figure() colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) if plot_all_classes: for i, color in", "image_np = np.expand_dims(image_np, 0) graph = tf.import_graph_def(graph_def, name='') with tf.Session(graph=graph) as sess: input_tensor_name", "l] = roi_img return canvas def test_frozen_graph_saliency_map(config): checkpoint_dir = config['checkpoint_path'] dataset_dir = get_dataset_dir(config)", "arr = np.asarray(im2).astype(np.float32) if not coreml: arr[:, :, 0] -= _R_MEAN arr[:, :,", "0, 'An offset for the labels in the dataset. This flag is primarily", "= defaultdict(int) try: os.makedirs(save_dir) except OSError: pass for j in range(n): image =", "= y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds = roc_curve( y_binary.ravel()[i_positive], y_score_matrix_ravel[i_positive]) roc_auc[\"highest_probability\"]", "# plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive", "fig.set_size_inches(18, 15) # https://stackoverflow.com/questions/22548813/python-color-map-but-with-all-zero-values-mapped-to-black # confusion_matrix = np.ma.masked_where(confusion_matrix < 0.01, # confusion_matrix) cmap", "'logits': result['logits'], 'grad_imgs': result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir", "to evaluate.') tf.app.flags.DEFINE_string( 'preprocessing_name', None, 'The name of the preprocessing to use. If", "variables_to_restore = slim.get_variables_to_restore() predictions = tf.argmax(logits, 1) one_hot_predictions = slim.one_hot_encoding( predictions, dataset.num_classes -", "where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads", "num_batches)) params = { k: v for k, v in info.items() if isinstance(v,", "file_name): import h5py info_file_path = os.path.join(directory, file_name) try: with h5py.File(info_file_path, 'r') as f:", "dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), # ('20180330/4PdXwYcGt/4PdXwYcGt_5.jpg', u'酢漿草'),", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "'variables_to_restore': variables_to_restore, 'images': images, 'raw_images': raw_images, 'network_fn': network_fn, 'labels': labels, 'logits': logits, 'probabilities':", "shown') plt.show() def get_matplot_zh_font(): # From https://blog.csdn.net/kesalin/article/details/71214038 fm = FontManager() mat_fonts = set(f.name", "sess.graph.get_tensor_by_name( input_tensor_name) # get input tensor output_tensor = sess.graph.get_tensor_by_name( output_tensor_name) # get output", "= image ##################################### # Select the preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name", "in output.split('\\n')) available = list(mat_fonts & zh_fonts) return available def set_matplot_zh_font(): available =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "y_score_matrix_ravel = y_score_matrix.ravel() i_positive = y_score_matrix_ravel != 0 fpr[\"highest_probability\"], tpr[ \"highest_probability\"], micro_thresholds =", "unique length', len(set(all_labels))) if use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False):", "std: x /= std x *= target_std x *= 255 x = np.clip(x,", "def save_var(directory, file_name, info): import h5py info_file_path = os.path.join(directory, file_name) f = h5py.File(info_file_path,", "found. Using non-interactive Agg backend') matplotlib.use('Agg') import matplotlib.pyplot as plt slim = tf.contrib.slim", "output_tensor_name.replace('/', '__').replace(':', '__')].flatten() return probs def run_inference_on_file_pb(config, filename, pb_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config))", "'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list = info['logits'] labels = info['labels']", "samples in each batch.') tf.app.flags.DEFINE_integer( 'max_num_batches', None, 'Max number of batches to evaluate", "use_cached: save_var(checkpoint_dir_path, 'run_info_result.h5', aggregated) return aggregated def _run_saliency_maps(config, use_cached=False): checkpoint_path = get_lastest_check_point(config) keys", "in zip(range(n_classes), colors): label = 'ROC curve of class {0} (area = {1:0.2f})'.format(", "grad_img = grad_imgs[j] label = labels[j] label_name = labels_to_names[label] if label_count_map[label] >= 10:", "/= 255.0 arr -= 0.5 arr *= 2.0 return arr def pre_process(config, im,", "3 alpha = np.clip(1 - intensify_factor * saliency.astype(float) / 255, 0, 1) paint", "inspect_datasets(config): dataset_dir = get_dataset_dir(config) examples = [] for i in range(5): tfrecords_filename =", "dataset # ############################################################## provider = slim.dataset_data_provider.DatasetDataProvider( dataset, num_epochs=1, # 每張只讀一次 # num_readers=1, shuffle=False,", "save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing import label_binarize possible_labels = list(range(max(labels)", "available = get_matplot_zh_font() if len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus']", "test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np, saliency): image_np = np.copy(np.asarray(image_np))[:, :]", "right\") pic_path = os.path.join(save_dir, 'roc_curve.png') plt.savefig(pic_path) print(pic_path, 'saved') print('ROC curve shown') plt.show() def", "224 im1 = resize(im, target_smallest_size) im2 = central_crop(im1, target_smallest_size, target_smallest_size) arr = np.asarray(im2).astype(np.float32)", "tuple(int(resize_ratio * l) for l in im.size) return im.resize(target_size, PIL.Image.BILINEAR) def central_crop(im, w,", "n): \"\"\"Yield successive n-sized chunks from l.\"\"\" return [l[i:i + n] for i", "i = 1 for row in image_table: for col in row: plt.subplot(n_row, n_columns,", "= min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas", "[value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) # TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches", "= cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset = int((w - l) /", "tf from tensorflow.python.training import monitored_session from datasets.plants import read_label_file from datasets import dataset_factory", "with get_monitored_session(checkpoint_path) as sess: for i in range(int(math.ceil(num_batches))): print('batch #{} of {}'.format(i, num_batches))", "tf.Session(graph=graph) as sess: input_tensor_name = \"input:0\" # output_tensor_name = \"resnet_v2_50/predictions/Reshape_1:0\" output_tensor_name = OUTPUT_MODEL_NODE_NAMES_DICT[", "'prediction_label': index[0], 'top_n_names': top_n_names, 'logits': logits.tolist(), } return result def test_inference_by_model_files(config, dataset_dir=None, frozen_graph_path=None,", "@cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def plot_roc(config_file, use_cached): with open(config_file) as f: config =", "else: aggregated[k] += np.matrix(value) else: if k not in aggregated: aggregated[k] = []", "= os.path.join(directory, file_name) f = h5py.File(info_file_path, 'w') for k, v in info.items(): f[k]", "in row: plt.subplot(n_row, n_columns, i) plt.imshow(col) i += 1 if file_name: plt.savefig(file_name) print(file_name,", "print(len(examples)) def resize(im, target_smallest_size): resize_ratio = 1.0 * target_smallest_size / min(list(im.size)) target_size =", "plot_roc(config_file, use_cached): with open(config_file) as f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file')", "0.5) * 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2,", "mat_fonts = set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f \"%{family}\\n\"',", "None: return aggregated calculate_confusion_matrix = True info = get_info(config, calculate_confusion_matrix=calculate_confusion_matrix) num_batches = info['num_batches']", "use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys = [ 'logits', 'labels', 'predictions', 'probabilities', ] info", "plt.get_cmap('Accent') # cmap = plt.get_cmap('coolwarm') # cmap = plt.get_cmap('plasma') # cmap = plt.get_cmap('Blues')", "-_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN, }, 'mobilenet_v1': { 'red_bias': -1.0, 'green_bias': -1.0, 'blue_bias':", "of batches to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master', '', 'The address", "and ResNet architectures which do not use a background ' 'class for the", "use_cached: aggregated = load_var(checkpoint_dir_path, 'run_info_result.h5') if aggregated is not None: return aggregated calculate_confusion_matrix", "@click.argument('config_file') def test_models(config_file): with open(config_file) as f: config = yaml.load(f) test_inference_by_model_files(config) @cli.command() @click.argument('config_file')", "'resnet_v2_50': pre_process_resnet, 'mobilenet_v1': pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return", "checkpoint_path def inspect_tfrecords(tfrecords_filename): record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename) examples = [] for string_record in record_iterator:", "= 1 for row in image_table: for col in row: plt.subplot(n_row, n_columns, i)", "used.') tf.app.flags.DEFINE_integer( 'eval_image_size', None, 'Eval image size') FLAGS = tf.app.flags.FLAGS def get_dataset_dir(config): return", "= output_matrix y_score_matrix = np.where( y_score_matrix == np.max(y_score_matrix, axis=1)[:, None], y_score_matrix, 0) tpr", "or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names = read_label_file(dataset_dir) probabilities = tf.nn.softmax(logits) softmax_cross_entropy_loss", "except in compliance with the License. # You may obtain a copy of", "= read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames = [ ('20180330/1lZsRrQzj/1lZsRrQzj_5.jpg', u'通泉草'), ('20180330/iUTbDxEoT/iUTbDxEoT_0.jpg', u'杜鵑花仙子'), #", "that evaluates a model using a given dataset.\"\"\" from __future__ import absolute_import from", "overlap = roi_img[paint > 128] if overlap.mean() + overlap.std() > 128: color =", "/ 255, 0, 1) paint = np.copy(1 - alpha) * 255 overlap =", "label_binarize possible_labels = list(range(max(labels) + 1)) y_binary = label_binarize(labels, classes=possible_labels) output_matrix = np.array(probabilities)", "= get_checkpoint_dir_path(config) frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' %", "2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache", "FontManager() mat_fonts = set(f.name for f in fm.ttflist) output = subprocess.check_output('fc-list :lang=zh-tw -f", "frozen_model_file = '%s/frozen_graph.pb' % checkpoint_dir_path coreml_model_file = coreml_file_path or '%s/plant.mlmodel' % checkpoint_dir_path image_np", "ax=ax, cmap=cmap) n = confusion_matrix.shape[0] # labels, title and ticks ax.set_xlabel('Predicted labels') ax.set_ylabel('True", "* 255).astype('uint8') blend = get_image_with_saliency_map(restored_image, saliency) plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name)", "'dataset_dir', None, 'The directory where the dataset files are stored.') tf.app.flags.DEFINE_integer( 'labels_offset', 0,", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "{0:0.2f})' ''.format(roc_auc[\"highest_probability\"]), color='blue', linestyle=':', linewidth=4) # plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0,", "if not coreml: arr /= 255.0 arr -= 0.5 arr *= 2.0 return", "allow_smaller_final_batch=True, capacity=5 * FLAGS.batch_size) #################### # Define the model # #################### logits, _", "f: config = yaml.load(f) _roc_analysis(config, use_cached=use_cached) @cli.command() @click.argument('config_file') @click.option('--use_cached', is_flag=True) def saliency_maps(config_file, use_cached):", "saliency.astype(float) / 255, 0, 1) paint = np.copy(1 - alpha) * 255 overlap", "rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC", "= cv2.resize(saliency, (l, l)) saliency = cv2.cvtColor(saliency, cv2.COLOR_GRAY2RGB) canvas = image_np[:, :] w_offset", "i in range(n)] ax.xaxis.set_ticklabels(axis, rotation=270) ax.yaxis.set_ticklabels(axis, rotation=0) pic_path = os.path.join(save_dir, 'confusion_matrix.png') plt.savefig(pic_path) print(pic_path,", "'Directory where the results are saved to.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of", "in range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_train_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) def resize(im, target_smallest_size): resize_ratio", "None, 'Max number of batches to evaluate by default use all.') tf.app.flags.DEFINE_string( 'master',", "preprocessing function # ##################################### preprocessing_name = FLAGS.preprocessing_name or model_name image_preprocessing_fn = preprocessing_factory.get_preprocessing( preprocessing_name,", "# master=master, # config=config ) return monitored_session.MonitoredSession( session_creator=session_creator) def plot_confusion_matrix(confusion_matrix, labels_to_names=None, save_dir='.'): import", "coremltools.models.MLModel(coreml_model_file) convert_model = False # convert_model = True if convert_model: extra_args = {", "dataset.num_classes - FLAGS.labels_offset) labels = tf.squeeze(labels) # Define the metrics: names_to_values, names_to_updates =", "record_iterator: example = tf.train.Example() example.ParseFromString(string_record) examples.append(example) # print(example) return examples def get_info(config, checkpoint_path=None,", "all_fpr tpr[\"macro\"] = mean_tpr roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"]) # key_series = 'micro' key_series", "= im1.resize((target_smallest_size, target_smallest_size), PIL.Image.BILINEAR) arr = np.asarray(im2).astype(np.float32) if not coreml: arr /= 255.0", "range(5): tfrecords_filename = os.path.join( dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i", "plt.figure(figsize=(15, 10)) plot_image_in_grids([ [saliency, image] ], file_name) def _eval_tensors(config, checkpoint_path=None, keys=None, use_cached=False): checkpoint_dir_path", "{} res = sess.run(params, feed_dict=feed_dict) except: import traceback traceback.print_exc() raise for k in", "dataset_dir = dataset_dir or get_dataset_dir(config) test_inference_by_pb(config, pb_file_path=frozen_graph_path, dataset_dir=dataset_dir) test_inference_by_coreml(config, coreml_file_path=coreml_file_path, dataset_dir=dataset_dir) def get_image_with_saliency_map(image_np,", "/ float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s' % checkpoint_path) labels_to_names =", "data. num_batches = math.ceil(dataset.num_samples / float(FLAGS.batch_size)) checkpoint_path = checkpoint_path or get_lastest_check_point(config) tf.logging.info('Evaluating %s'", "half_h + h / 2)) def pre_process_resnet(im, coreml=False): target_smallest_size = 224 im1 =", "= np.argsort(logits, 1) top_n_names = list(reversed( [labels_to_names[i] for i in list(index_list[0])])) print('logits', logits)", "paint = np.copy(1 - alpha) * 255 overlap = roi_img[paint > 128] if", "plt.subplot() fig, ax = plt.subplots() # the size of A4 paper fig.set_size_inches(18, 15)", "curve and ROC area # First aggregate all false positive rates all_fpr =", "_plot_roc(logits_list, labels, predictions, probabilities, plot_all_classes=False, save_dir=None): from sklearn.metrics import roc_curve, auc from sklearn.preprocessing", "FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures that we make a single", "* central_fraction) def pre_process_mobilenet(im, coreml=False): # 參考 https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/inception_preprocessing.py # 裡的 preprocess_for_eval im1 =", "result.get('grad_imgs'), } def test_inference_by_coreml(config, coreml_file_path=None, dataset_dir=None): labels_to_names = read_label_file(get_dataset_dir(config)) dataset_dir = get_dataset_dir(config) filenames", "[ 'confusion_matrix', ] aggregated = _eval_tensors(config, keys=keys, use_cached=use_cached) checkpoint_dir_path = get_checkpoint_dir_path(config) dataset_dir =", "pre_process_mobilenet, }[model_name](im, coreml=coreml) def get_model_name(config): model_name = get_config_value(config, 'model_name') return model_name def test_inference_by_pb(config,", "= [ 'logits', 'labels', 'predictions', 'probabilities', ] info = _eval_tensors(config, keys=keys, use_cached=use_cached) logits_list", "v for k, v in info.items() if isinstance(v, tf.Tensor) and (not keys or", "plot_image_in_grids([ saliency, restored_image, blend, ], n_columns=2, file_name=file_name) label_count_map[label] += 1 def _plot_roc(logits_list, labels,", "dataset_dir, 'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in range(5): tfrecords_filename =", "h = image_np.shape[0:2] l = min(w, h) saliency = cv2.resize(saliency, (l, l)) saliency", "Define the model # #################### logits, _ = network_fn(images) if FLAGS.moving_average_decay: variable_averages =", "tf.logging.set_verbosity(tf.logging.INFO) tf.Graph().as_default() tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ###################### dataset", "def save_saliency_maps(config, grad_imgs, images, prefix='', labels=None): n = images.shape[0] save_dir = 'saliency_maps' labels_to_names", "# 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x, target_std=0.15): # normalize tensor x =", "else: color = np.array([255, 200, 0]).astype(float) / 255 # orange paint[:, :] *=", "print(pic_path, 'saved') print('ROC curve shown') plt.show() def _roc_analysis(config, use_cached=False): checkpoint_dir_path = get_checkpoint_dir_path(config) keys", "len(available) > 0: plt.rcParams['font.sans-serif'] = [available[0]] # 指定默认字体 plt.rcParams['axes.unicode_minus'] = False def deprocess_image(x,", "tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 4, 'The number of threads used to create the batches.') tf.app.flags.DEFINE_string(", "'plants_validation_{:05d}-of-00005.tfrecord'.format(i)) examples.extend(inspect_tfrecords(tfrecords_filename)) print(len(examples)) examples = [] for i in range(5): tfrecords_filename = os.path.join(", "TODO(sguada) use num_epochs=1 if FLAGS.max_num_batches: num_batches = FLAGS.max_num_batches else: # This ensures that", "name of the dataset to load.') tf.app.flags.DEFINE_string( 'dataset_split_name', 'validation', 'The name of the", "'logits'] index = np.argmax(logits, 1) prediction_name = labels_to_names[index[0]] index_list = np.argsort(logits, 1) top_n_names", "= images.shape[0] save_dir = 'saliency_maps' labels_to_names = read_label_file(get_dataset_dir(config)) label_count_map = defaultdict(int) try: os.makedirs(save_dir)", "sns.axes_style('darkgrid'): sns.heatmap(confusion_matrix, linewidths=0.2, linecolor='#eeeeee', xticklabels=True, yticklabels=True, mask=mask, annot=False, ax=ax, cmap=cmap) n = confusion_matrix.shape[0]", "if convert_model: extra_args = { 'resnet_v2_50': { 'red_bias': -_R_MEAN, 'green_bias': -_G_MEAN, 'blue_bias': -_B_MEAN," ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "KIND, either express or implied. # See the License for the specific language", "metrics Arguments: labels: tf.Tensor objects, True values of the dependent variable predictions: tf.Tensor", "Unless required by applicable law or agreed to in writing, software # distributed", "def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True", "objects, Predictions from the model Returns: The mean per class accuracy \"\"\" return", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "tf.Tensor objects, Predictions from the model Returns: The mean per class accuracy \"\"\"", "labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared error metric for", "predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared error metric for regression", "model training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class", "License. # You may obtain a copy of the License at # #", "model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute", "tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error for regression model.", "dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Mean absolute error", "model Returns: The mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'],", "tasks. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects,", "objects, Predictions from the model Returns: The AUC metric for the model \"\"\"", "the model Returns: The AUC metric for the model \"\"\" return {'auc_ci': tf.metrics.auc(", "mean squared error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) }", "\"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True values of dependent", "law or agreed to in writing, software # distributed under the License is", "values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Mean", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "def mar(labels, predictions): \"\"\"Mean absolute error for regression model. Arguments: labels: tf.Tensor objects,", "accuracy metrics Arguments: labels: tf.Tensor objects, True values of the dependent variable predictions:", "import tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics", "absolute error for regression model. Arguments: labels: tf.Tensor objects, True values of dependent", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "Predictions from the model Returns: The AUC metric for the model \"\"\" return", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The", "in tensorflow for model training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions, num_classes):", "predictions: tf.Tensor objects, Predictions from the model Returns: The mean per class accuracy", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "creating metrics in tensorflow for model training.\"\"\" import tensorflow as tf def mean_acc(labels,", "using interpolation. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor", "my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True values", "values of the dependent variable predictions: tf.Tensor objects, Predictions from the model Returns:", "objects, Predictions from the model Returns: Mean absolute error for the regression model", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "model Returns: The AUC metric for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels,", "predictions): \"\"\"Mean absolute error for regression model. Arguments: labels: tf.Tensor objects, True values", "labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from", "{'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared error", "tf.Tensor objects, Predictions from the model Returns: Root mean squared error for regression", "Root mean squared error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions'])", "num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor objects, True values of", "\"\"\"Root mean squared error metric for regression tasks. Arguments: labels: tf.Tensor objects, True", "in compliance with the License. # You may obtain a copy of the", "dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Root mean squared", "values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Root", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "for creating metrics in tensorflow for model training.\"\"\" import tensorflow as tf def", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "from the model Returns: Root mean squared error for regression model \"\"\" return", "use this file except in compliance with the License. # You may obtain", "of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Root mean", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0", "under the License. \"\"\"Module for creating metrics in tensorflow for model training.\"\"\" import", "predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels:", "per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels,", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor objects,", "AUC metric for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') }", "License, Version 2.0 (the \"License\"); # you may not use this file except", "{'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error for regression", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "Predictions from the model Returns: Mean absolute error for the regression model \"\"\"", "return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric", "return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error for", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "License. \"\"\"Module for creating metrics in tensorflow for model training.\"\"\" import tensorflow as", "objects, True values of the dependent variable predictions: tf.Tensor objects, Predictions from the", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "metrics in tensorflow for model training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions,", "the specific language governing permissions and # limitations under the License. \"\"\"Module for", "predictions): \"\"\"Root mean squared error metric for regression tasks. Arguments: labels: tf.Tensor objects,", "Arguments: labels: tf.Tensor objects, True values of the dependent variable predictions: tf.Tensor objects,", "OF ANY KIND, either express or implied. # See the License for the", "for regression model. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions:", "2.0 (the \"License\"); # you may not use this file except in compliance", "training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy", "return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared", "the License. \"\"\"Module for creating metrics in tensorflow for model training.\"\"\" import tensorflow", "# you may not use this file except in compliance with the License.", "squared error metric for regression tasks. Arguments: labels: tf.Tensor objects, True values of", "as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels:", "tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from the", "regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean", "agreed to in writing, software # distributed under the License is distributed on", "of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The AUC", "The AUC metric for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation')", "True values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error for regression model. Arguments:", "Returns: The mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes)", "(the \"License\"); # you may not use this file except in compliance with", "tensorflow for model training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean", "limitations under the License. \"\"\"Module for creating metrics in tensorflow for model training.\"\"\"", "of the dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The", "# # Unless required by applicable law or agreed to in writing, software", "from the model Returns: Mean absolute error for the regression model \"\"\" return", "\"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean", "express or implied. # See the License for the specific language governing permissions", "predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True values of", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "Returns: Mean absolute error for the regression model \"\"\" return {'mean_absolute_error': tf.metrics.mean_absolute_error( labels,", "except in compliance with the License. # You may obtain a copy of", "} def mar(labels, predictions): \"\"\"Mean absolute error for regression model. Arguments: labels: tf.Tensor", "by applicable law or agreed to in writing, software # distributed under the", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "Returns: The AUC metric for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'],", "} def rmse(labels, predictions): \"\"\"Root mean squared error metric for regression tasks. Arguments:", "for regression tasks. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions:", "mar(labels, predictions): \"\"\"Mean absolute error for regression model. Arguments: labels: tf.Tensor objects, True", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "objects, Predictions from the model Returns: Root mean squared error for regression model", "for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions):", "\"\"\"Mean absolute error for regression model. Arguments: labels: tf.Tensor objects, True values of", "def rmse(labels, predictions): \"\"\"Root mean squared error metric for regression tasks. Arguments: labels:", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "metric for regression tasks. Arguments: labels: tf.Tensor objects, True values of dependent variable", "metric for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def", "\"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor objects, True values of the", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "error for regression model. Arguments: labels: tf.Tensor objects, True values of dependent variable", "# limitations under the License. \"\"\"Module for creating metrics in tensorflow for model", "model Returns: Root mean squared error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error(", "Returns: Root mean squared error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels,", "# Copyright 2019 Google LLC # # Licensed under the Apache License, Version", "file except in compliance with the License. # You may obtain a copy", "squared error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def", "\"\"\"Module for creating metrics in tensorflow for model training.\"\"\" import tensorflow as tf", "predictions: tf.Tensor objects, Predictions from the model Returns: Mean absolute error for the", "absolute error for the regression model \"\"\" return {'mean_absolute_error': tf.metrics.mean_absolute_error( labels, predictions['predictions']) }", "from the model Returns: The mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy(", "model Returns: Mean absolute error for the regression model \"\"\" return {'mean_absolute_error': tf.metrics.mean_absolute_error(", "labels: tf.Tensor objects, True values of the dependent variable predictions: tf.Tensor objects, Predictions", "error metric for regression tasks. Arguments: labels: tf.Tensor objects, True values of dependent", "tf.Tensor objects, Predictions from the model Returns: Mean absolute error for the regression", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "variable predictions: tf.Tensor objects, Predictions from the model Returns: The AUC metric for", "License for the specific language governing permissions and # limitations under the License.", "Mean absolute error for the regression model \"\"\" return {'mean_absolute_error': tf.metrics.mean_absolute_error( labels, predictions['predictions'])", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the License. # You may obtain a copy of the License at #", "language governing permissions and # limitations under the License. \"\"\"Module for creating metrics", "\"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error", "mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "{'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric using", "tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation.", "tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments:", "labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments:", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor objects, True", "predictions['predictions']) } def mar(labels, predictions): \"\"\"Mean absolute error for regression model. Arguments: labels:", "\"License\"); # you may not use this file except in compliance with the", "error for regression model \"\"\" return {'root_mean_square_error': tf.metrics.root_mean_squared_error( labels, predictions['predictions']) } def mar(labels,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "regression model. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "for the specific language governing permissions and # limitations under the License. \"\"\"Module", "predictions: tf.Tensor objects, Predictions from the model Returns: Root mean squared error for", "variable predictions: tf.Tensor objects, Predictions from the model Returns: Root mean squared error", "The mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) }", "tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor", "dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The AUC metric", "specific language governing permissions and # limitations under the License. \"\"\"Module for creating", "applicable law or agreed to in writing, software # distributed under the License", "per class accuracy metrics Arguments: labels: tf.Tensor objects, True values of the dependent", "class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions):", "of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Mean absolute", "governing permissions and # limitations under the License. \"\"\"Module for creating metrics in", "Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions", "Predictions from the model Returns: Root mean squared error for regression model \"\"\"", "\"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels, predictions['class_ids'], num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC", "predictions: tf.Tensor objects, Predictions from the model Returns: The AUC metric for the", "for model training.\"\"\" import tensorflow as tf def mean_acc(labels, predictions, num_classes): \"\"\"Mean per", "model. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects,", "and # limitations under the License. \"\"\"Module for creating metrics in tensorflow for", "} def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects,", "the dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The mean", "interpolation. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects,", "True values of the dependent variable predictions: tf.Tensor objects, Predictions from the model", "metric using interpolation. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions:", "variable predictions: tf.Tensor objects, Predictions from the model Returns: The mean per class", "or agreed to in writing, software # distributed under the License is distributed", "for the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels,", "rmse(labels, predictions): \"\"\"Root mean squared error metric for regression tasks. Arguments: labels: tf.Tensor", "or implied. # See the License for the specific language governing permissions and", "from the model Returns: The AUC metric for the model \"\"\" return {'auc_ci':", "the model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions):", "predictions, num_classes): \"\"\"Mean per class accuracy metrics Arguments: labels: tf.Tensor objects, True values", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "the model Returns: Mean absolute error for the regression model \"\"\" return {'mean_absolute_error':", "Predictions from the model Returns: The mean per class accuracy \"\"\" return {'mean_class_acc':", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "permissions and # limitations under the License. \"\"\"Module for creating metrics in tensorflow", "mean squared error metric for regression tasks. Arguments: labels: tf.Tensor objects, True values", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "the model Returns: The mean per class accuracy \"\"\" return {'mean_class_acc': tf.metrics.mean_per_class_accuracy( labels,", "dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The mean per", "AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True values of dependent variable", "regression tasks. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor", "tf.Tensor objects, True values of the dependent variable predictions: tf.Tensor objects, Predictions from", "model \"\"\" return {'auc_ci': tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root", "the model Returns: Root mean squared error for regression model \"\"\" return {'root_mean_square_error':", "with the License. # You may obtain a copy of the License at", "<gh_stars>1000+ # Copyright 2019 Google LLC # # Licensed under the Apache License,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "num_classes) } def my_auc(labels, predictions): \"\"\"Custom AUC metric using interpolation. Arguments: labels: tf.Tensor", "in writing, software # distributed under the License is distributed on an \"AS", "class accuracy metrics Arguments: labels: tf.Tensor objects, True values of the dependent variable", "tf.metrics.auc( labels, predictions['class_ids'], summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared error metric", "tf.Tensor objects, Predictions from the model Returns: The AUC metric for the model", "summation_method='careful_interpolation') } def rmse(labels, predictions): \"\"\"Root mean squared error metric for regression tasks.", "objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from the model", "variable predictions: tf.Tensor objects, Predictions from the model Returns: Mean absolute error for", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "in events: if start not in reserved: reserved.add(start) else: for day in range(start,", "= set() for start, end in events: if start not in reserved: reserved.add(start)", "[3, 4], [1, 2]], [[1, 4], [4, 4], [2, 2], [3, 4], [1,", "range(start, end+1): if day not in reserved: reserved.add(day) break return len(reserved) if __name__", "start not in reserved: reserved.add(start) else: for day in range(start, end+1): if day", "(x[1] - x[0], x[0], x[1])) reserved = set() for start, end in events:", "'__main__': events = [ [[1, 2], [2, 3], [3, 4]], [[1, 2], [2,", "2], [2, 3], [3, 4]], [[1, 2], [2, 3], [3, 4], [1, 2]],", "100000]], [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6],", "3], [1, 4], [1, 5], [1, 6], [1, 7]], ] for event in", "reserved: reserved.add(day) break return len(reserved) if __name__ == '__main__': events = [ [[1,", "in reserved: reserved.add(start) else: for day in range(start, end+1): if day not in", "3], [3, 4]], [[1, 2], [2, 3], [3, 4], [1, 2]], [[1, 4],", "end in events: if start not in reserved: reserved.add(start) else: for day in", "__name__ == '__main__': events = [ [[1, 2], [2, 3], [3, 4]], [[1,", "x: (x[1] - x[0], x[0], x[1])) reserved = set() for start, end in", "= [ [[1, 2], [2, 3], [3, 4]], [[1, 2], [2, 3], [3,", "in reserved: reserved.add(day) break return len(reserved) if __name__ == '__main__': events = [", "len(reserved) if __name__ == '__main__': events = [ [[1, 2], [2, 3], [3,", "in range(start, end+1): if day not in reserved: reserved.add(day) break return len(reserved) if", "[ [[1, 2], [2, 3], [3, 4]], [[1, 2], [2, 3], [3, 4],", "-> int: events = sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1])) reserved", "events = [ [[1, 2], [2, 3], [3, 4]], [[1, 2], [2, 3],", "[1, 4], [1, 5], [1, 6], [1, 7]], ] for event in events:", "2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7]], ] for", "for day in range(start, end+1): if day not in reserved: reserved.add(day) break return", "if __name__ == '__main__': events = [ [[1, 2], [2, 3], [3, 4]],", "4], [1, 5], [1, 6], [1, 7]], ] for event in events: print(maxEvents(event))", "int: events = sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1])) reserved =", "4], [1, 2]], [[1, 4], [4, 4], [2, 2], [3, 4], [1, 1]],", "[4, 4], [2, 2], [3, 4], [1, 1]], [[1, 100000]], [[1, 1], [1,", "4], [4, 4], [2, 2], [3, 4], [1, 1]], [[1, 100000]], [[1, 1],", "[[1, 2], [2, 3], [3, 4]], [[1, 2], [2, 3], [3, 4], [1,", "[1, 3], [1, 4], [1, 5], [1, 6], [1, 7]], ] for event", "break return len(reserved) if __name__ == '__main__': events = [ [[1, 2], [2,", "maxEvents(events: list[list[int, int]]) -> int: events = sorted(events, key=lambda x: (x[1] - x[0],", "4], [2, 2], [3, 4], [1, 1]], [[1, 100000]], [[1, 1], [1, 2],", "[2, 2], [3, 4], [1, 1]], [[1, 100000]], [[1, 1], [1, 2], [1,", "def maxEvents(events: list[list[int, int]]) -> int: events = sorted(events, key=lambda x: (x[1] -", "[1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7]], ]", "if day not in reserved: reserved.add(day) break return len(reserved) if __name__ == '__main__':", "events = sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1])) reserved = set()", "x[0], x[0], x[1])) reserved = set() for start, end in events: if start", "4], [1, 1]], [[1, 100000]], [[1, 1], [1, 2], [1, 3], [1, 4],", "3], [3, 4], [1, 2]], [[1, 4], [4, 4], [2, 2], [3, 4],", "day not in reserved: reserved.add(day) break return len(reserved) if __name__ == '__main__': events", "- x[0], x[0], x[1])) reserved = set() for start, end in events: if", "[2, 3], [3, 4]], [[1, 2], [2, 3], [3, 4], [1, 2]], [[1,", "reserved: reserved.add(start) else: for day in range(start, end+1): if day not in reserved:", "[3, 4], [1, 1]], [[1, 100000]], [[1, 1], [1, 2], [1, 3], [1,", "1]], [[1, 100000]], [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5],", "list[list[int, int]]) -> int: events = sorted(events, key=lambda x: (x[1] - x[0], x[0],", "set() for start, end in events: if start not in reserved: reserved.add(start) else:", "[[1, 100000]], [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1,", "1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7]],", "reserved = set() for start, end in events: if start not in reserved:", "key=lambda x: (x[1] - x[0], x[0], x[1])) reserved = set() for start, end", "x[0], x[1])) reserved = set() for start, end in events: if start not", "<reponame>Sma-Das/Leetcode def maxEvents(events: list[list[int, int]]) -> int: events = sorted(events, key=lambda x: (x[1]", "2]], [[1, 4], [4, 4], [2, 2], [3, 4], [1, 1]], [[1, 100000]],", "int]]) -> int: events = sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1]))", "not in reserved: reserved.add(start) else: for day in range(start, end+1): if day not", "else: for day in range(start, end+1): if day not in reserved: reserved.add(day) break", "== '__main__': events = [ [[1, 2], [2, 3], [3, 4]], [[1, 2],", "start, end in events: if start not in reserved: reserved.add(start) else: for day", "if start not in reserved: reserved.add(start) else: for day in range(start, end+1): if", "[[1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1,", "for start, end in events: if start not in reserved: reserved.add(start) else: for", "[[1, 4], [4, 4], [2, 2], [3, 4], [1, 1]], [[1, 100000]], [[1,", "4]], [[1, 2], [2, 3], [3, 4], [1, 2]], [[1, 4], [4, 4],", "2], [3, 4], [1, 1]], [[1, 100000]], [[1, 1], [1, 2], [1, 3],", "[1, 2]], [[1, 4], [4, 4], [2, 2], [3, 4], [1, 1]], [[1,", "day in range(start, end+1): if day not in reserved: reserved.add(day) break return len(reserved)", "sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1])) reserved = set() for start,", "reserved.add(day) break return len(reserved) if __name__ == '__main__': events = [ [[1, 2],", "reserved.add(start) else: for day in range(start, end+1): if day not in reserved: reserved.add(day)", "[1, 1]], [[1, 100000]], [[1, 1], [1, 2], [1, 3], [1, 4], [1,", "not in reserved: reserved.add(day) break return len(reserved) if __name__ == '__main__': events =", "events: if start not in reserved: reserved.add(start) else: for day in range(start, end+1):", "[3, 4]], [[1, 2], [2, 3], [3, 4], [1, 2]], [[1, 4], [4,", "return len(reserved) if __name__ == '__main__': events = [ [[1, 2], [2, 3],", "[[1, 2], [2, 3], [3, 4], [1, 2]], [[1, 4], [4, 4], [2,", "2], [2, 3], [3, 4], [1, 2]], [[1, 4], [4, 4], [2, 2],", "= sorted(events, key=lambda x: (x[1] - x[0], x[0], x[1])) reserved = set() for", "x[1])) reserved = set() for start, end in events: if start not in", "end+1): if day not in reserved: reserved.add(day) break return len(reserved) if __name__ ==", "[2, 3], [3, 4], [1, 2]], [[1, 4], [4, 4], [2, 2], [3," ]
[ "app with app.test_client() as c: response= c.get('/') assert response.data == b'Hello World!' assert", "from app import app with app.test_client() as c: response= c.get('/') assert response.data ==", "app import app with app.test_client() as c: response= c.get('/') assert response.data == b'Hello", "import app with app.test_client() as c: response= c.get('/') assert response.data == b'Hello World!'", "with app.test_client() as c: response= c.get('/') assert response.data == b'Hello World!' assert response.status_code==200" ]
[ "\"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \", d[\"foo\"])", "defaultdict def default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d)", "defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \", d[\"foo\"]) print(\"bar => \", d[\"bar\"])", "# -*- coding: utf-8 -*- from collections import defaultdict def default_factory(): return \"default", "def default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo", "value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \", d[\"foo\"]) print(\"bar", "from collections import defaultdict def default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\")", "collections import defaultdict def default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d:", "= defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \", d[\"foo\"]) print(\"bar => \",", "default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo =>", "-*- coding: utf-8 -*- from collections import defaultdict def default_factory(): return \"default value\"", "-*- from collections import defaultdict def default_factory(): return \"default value\" d = defaultdict(default_factory,", "return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \",", "d = defaultdict(default_factory, foo=\"bar\") print(\"d: \", d) print(\"foo => \", d[\"foo\"]) print(\"bar =>", "utf-8 -*- from collections import defaultdict def default_factory(): return \"default value\" d =", "<gh_stars>1-10 # -*- coding: utf-8 -*- from collections import defaultdict def default_factory(): return", "coding: utf-8 -*- from collections import defaultdict def default_factory(): return \"default value\" d", "import defaultdict def default_factory(): return \"default value\" d = defaultdict(default_factory, foo=\"bar\") print(\"d: \"," ]
[ "i+1 == self.sz: # last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i", "= None def __iter__(self): start = datetime.datetime.now() last_len = 0 s='' for (i,x)", "print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e: print e", "for pathname in progress: s = cache.get(pathname) if cache else None if s", "json import unidecode import argparse class Storage: def __init__(self, pathname): self.year = None", "self.dict()[key] def item(self,key): if key=='y': return self.year elif key=='m': return self.month elif key=='c':", "def main(): try: # create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number", "None s = Storage(None) (s.year, s.month, s.country, s.state, s.city) = row s.cached =", "elif key=='m': return self.month elif key=='c': return self.country elif key=='s': return self.state elif", "__init__(self, pathname): self.year = None self.month = None self.country = None self.state =", "this_len = len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len", "self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class", "?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert", "if fd.isatty(): self.fd = fd else: self.fd = None except: self.fd = None", "%s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return", "self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc", "\"found \"+str(row) if row is None: return None s = Storage(None) (s.year, s.month,", "unidecode import argparse class Storage: def __init__(self, pathname): self.year = None self.month =", "self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state)", "service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory", "if s is None: s = Storage(pathname) try: im = Image.open(pathname) exif =", "fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back", "subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before", "TABLE IF NOT EXISTS storage ( hash int PRIMARY KEY, year text, month", "cached += sub_cached non_cached += sub_non_cached return (cached, non_cached) def size(self): return len(self.value)", "cached = len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) - cached for k,n", "\"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s if cache:", "KEY, year text, month text, country text, state text, city text, filename text", "e: print e import traceback tb = traceback.format_exc() print tb return -1 if", "back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len))", "*= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor =", "elif i > 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) /", "the order args.order=args.order.lower() oc = [0]*128 for ch in args.order: oc[ord(ch)] += 1", "return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql = \"\"\"", "flush(self): if len(self.pending) > 0: cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash,", "month, country, state, city FROM storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,))", "self.children[k].merge(v) else: self.children[k] = v self.value += n.value def flatten(self): new_node = Node()", "state = unidecode.unidecode(address['state']) except: state = None try: country = unidecode.unidecode(address['country']) except: country", "class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address =", "def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year, month,", "return s is not None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def", "[] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k)", "shutil from PIL import Image import datetime import time import PIL.ExifTags import sqlite3", "= unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min,", "self.fd = fd else: self.fd = None except: self.fd = None def __iter__(self):", "'' for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return", "cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd): self.lst = lst self.sz", "s is not None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname):", "structure\") args = parser.parse_args() # check the order args.order=args.order.lower() oc = [0]*128 for", "env(key,default): try: val = os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int):", "text, country text, state text, city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit()", "in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k] = v self.value +=", "return self.year elif key=='m': return self.month elif key=='c': return self.country elif key=='s': return", "= new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in", "SELECT * FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self):", "self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None)", "storage\" self.pending = {} self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql", "non_cached = 0 if self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached =", "len(self.children)==0 def add(self,k): if k is None: return self if not k in", "country, state, city, filename) VALUES (?, ?, ?, ?, ?, ?, ?); \"\"\"", "= None if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt)", "\"\"\" CREATE TABLE IF NOT EXISTS storage ( hash int PRIMARY KEY, year", "rows def flush(self): if len(self.pending) > 0: cursor = self.conn.cursor() sql = \"INSERT", "exif = ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year s.month = exif.month", "IF NOT EXISTS storage ( hash int PRIMARY KEY, year text, month text,", "return self.dict()[key] def item(self,key): if key=='y': return self.year elif key=='m': return self.month elif", "s.city) = row s.cached = True return s def __contains__(self,h): s = self[h]", "if k != dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception", "items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory", "self.sz = len(self.lst) try: if fd.isatty(): self.fd = fd else: self.fd = None", "self.use_pending: self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor =", "= requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city']) except:", "self.city = None if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year =", "for k,v in stores.iteritems(): node = self.head for m in mode: node =", "progress: s = cache.get(pathname) if cache else None if s is None: s", "key=='c': return self.country elif key=='s': return self.state elif key=='l': return self.city else: return", "and exif.month: s.year = exif.year s.month = exif.month if exif.lat and exif.lon: if", "datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else:", "'true' elif isinstance(default,int): return int(val) else: return val except: return default def main():", "Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0 return dd class", "be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key.", "= ' ' * (Node.prefixsz * level) if self.value: for v, cached in", "True return s def __contains__(self,h): s = self[h] return s is not None", "except: city = None try: state = unidecode.unidecode(address['state']) except: state = None try:", "elif key=='s': return self.state elif key=='l': return self.city else: return None class Node:", "{} self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT", "elif isinstance(default,int): return int(val) else: return val except: return default def main(): try:", "month, country, state, city,filename) VALUES \" results = [] sqlext = [] for", "storage ( hash int PRIMARY KEY, year text, month text, country text, state", "results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending", "def __init__(self): self.children={} self.value = [] def isLeaf(self): return len(self.children)==0 def add(self,k): if", "results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {}", "pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt)", "= os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql", "e: print str(e) def env(key,default): try: val = os.environ[key] if isinstance(default,bool): return val.lower()", "os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for", "cursor.fetchone() #print \"found \"+str(row) if row is None: return None s = Storage(None)", "for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached +=", "for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn", "default def main(): try: # create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum", "= Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value", "def item(self,key): if key=='y': return self.year elif key=='m': return self.month elif key=='c': return", "cached,non_cached = n.count_cached() if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def", "'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems():", "in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order. Must be permutation of", "n.value def flatten(self): new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children =", "(previous) elements in directory structure\") args = parser.parse_args() # check the order args.order=args.order.lower()", "if not dry_run: try: shutil.move(k,dst) except Exception as e: print str(e) def env(key,default):", "def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator)", "import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse geocoding gmaps =", "for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse):", "c, self.value)) non_cached = len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached)", "print rows def flush(self): if len(self.pending) > 0: cursor = self.conn.cursor() sql =", "d in address: if key in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder):", "exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon =", "elif key=='c': return self.country elif key=='s': return self.state elif key=='l': return self.city else:", "INTO storage(hash, year, month, country, state, city, filename) VALUES (?, ?, ?, ?,", "= len(self.lst) try: if fd.isatty(): self.fd = fd else: self.fd = None except:", "\"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size()", "if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order. Must be", "row = cursor.fetchone() #print \"found \"+str(row) if row is None: return None s", "shutil.move(k,dst) except Exception as e: print str(e) def env(key,default): try: val = os.environ[key]", "%s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size() < minimum) and (current >=", "self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse)", "= raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds", "def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return", "%(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return cls(stores,mode)", "= None self.month = None self.country = None self.state = None self.city =", "self.month = None self.country = None self.state = None self.city = None if", "cache else None if s is None: s = Storage(pathname) try: im =", "len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems():", "show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached = 0 if", "and (n.size() < minimum) and (current >= level): if show_collapse: print \"merging %s", "datetime.datetime.now() last_len = 0 s='' for (i,x) in enumerate(self.lst): yield x elapsed =", "raise RuntimeError(\"Invalid argument for --order. Must be permutation of 'YMCSL'\") # move the", "s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import", "rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args = parser.parse_args() # check", "files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e:", "photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args =", "new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items():", "def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ')", "self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print", "oc = [0]*128 for ch in args.order: oc[ord(ch)] += 1 for (i,cc) in", "\"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h)", "if neg: dd *= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn =", "try: shutil.move(k,dst) except Exception as e: print str(e) def env(key,default): try: val =", "self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd): self.lst = lst self.sz =", "s.year = exif.year s.month = exif.month if exif.lat and exif.lon: if google is", "(?, ?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country,", "M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to use", "state = None try: country = unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country)", "country text, state text, city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print", "/ float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd", "sub_cached non_cached += sub_non_cached return (cached, non_cached) def size(self): return len(self.value) + len(self.children)", "directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e)", "storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least {}", "os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return int(val) else: return", "def merge(self,n): for k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k]", "parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args = parser.parse_args() # check the", "= parser.parse_args() # check the order args.order=args.order.lower() oc = [0]*128 for ch in", "= os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return int(val) else:", "args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if cc > (1 if", "not cached or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached()", "#print \"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e:", "filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending = {}", "(h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s", "if exif.lat and exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country)", "= (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0 if neg:", "' * (Node.prefixsz * level) if self.value: for v, cached in self.value: if", "Node() for k,v in stores.iteritems(): node = self.head for m in mode: node", "= None self.city = None if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix)", "elapsed = datetime.datetime.now() - start if i+1 == self.sz: # last one predict_display", "\"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def", "= Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year s.month", "= self[h] return s is not None def get(self,pathname): h = self.make_hash(pathname) return", "files in os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress =", "self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None", "__contains__(self,h): s = self[h] return s is not None def get(self,pathname): h =", "= \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz *", "__init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE IF", "address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country)", "\"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn =", "reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def", "def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try:", "Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value def", "in self.value: if not cached or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems():", "levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d =", "cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month", "ch in args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if cc >", "self if not k in self.children: self.children[k] = Node() return self.children[k] def merge(self,n):", "0 non_cached = 0 if self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached", "try: if fd.isatty(): self.fd = fd else: self.fd = None except: self.fd =", "VALUES (?, ?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month,", "rtrn[v] = os.path.basename(v) if full_path else '' for k,n in self.children.iteritems(): for dk,dv", "%s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size() <", "city, filename) VALUES (?, ?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h,", "= False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return", "def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT year, month, country, state,", "super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try: city =", "geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in", "= None if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon", "s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT", "files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress: s", "cached in self.value: rtrn[v] = os.path.basename(v) if full_path else '' for k,n in", "__iter__(self): start = datetime.datetime.now() last_len = 0 s='' for (i,x) in enumerate(self.lst): yield", "import argparse class Storage: def __init__(self, pathname): self.year = None self.month = None", "\"\"\" SELECT year, month, country, state, city FROM storage WHERE hash = ?", "move the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception", "self.year = None self.month = None if 0x8825 in exifraw: gpsraw = exifraw[0x8825]", "node = self.head for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path):", "?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f)", "self.pending = {} self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql =", "argparse class Storage: def __init__(self, pathname): self.year = None self.month = None self.country", "= googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state =", "if len(self.pending) > 0: cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash, year,", "if key in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon)", "if exif.year and exif.month: s.year = exif.year s.month = exif.month if exif.lat and", "city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key):", "except: country = None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run,", "return val except: return default def main(): try: # create the args list", "self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) - cached for", "0): raise RuntimeError(\"Invalid argument for --order. Must be permutation of 'YMCSL'\") # move", "= n.count_cached() if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self):", "= float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \" (%.2f", "key=='l': return self.city else: return None class Node: prefixsz = 8 def __init__(self):", "cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year =", "((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator))", "yield x elapsed = datetime.datetime.now() - start if i+1 == self.sz: # last", "list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in", "return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return self.year elif", "= \"\"\" CREATE TABLE IF NOT EXISTS storage ( hash int PRIMARY KEY,", "@staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees =", "to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\")", "'YMCSL'\") # move the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0", "%s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i)", "in self.value: rtrn[v] = os.path.basename(v) if full_path else '' for k,n in self.children.iteritems():", "if self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) - cached", "exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat =", "self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached += sub_non_cached return (cached,", "if k in self.children: self.children[k].merge(v) else: self.children[k] = v self.value += n.value def", "exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None self.month =", "except Exception as e: #print str(e) pass dst = os.path.join(root,v) if k !=", "storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google)", "sql = \"\"\" CREATE TABLE IF NOT EXISTS storage ( hash int PRIMARY", "?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql +=", "= GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address:", "last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else:", "if key=='y': return self.year elif key=='m': return self.month elif key=='c': return self.country elif", "self.sz: # last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i > 2:", "return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text)", "= (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) /", "self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)):", "= {} self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\"", "__init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look", "def env(key,default): try: val = os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif", "unidecode.unidecode(address['state']) except: state = None try: country = unidecode.unidecode(address['country']) except: country = None", "self.value: if not cached or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached", "v, cached in self.value: if not cached or show_cached: print prefix+os.path.basename(v) for k,n", "results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit()", "try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass dst = os.path.join(root,v) if", "cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year", "return self.month elif key=='c': return self.country elif key=='s': return self.state elif key=='l': return", "s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s): h =", "raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds =", "(v,c): c, self.value)) non_cached = len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached,", "in self.children: self.children[k] = Node() return self.children[k] def merge(self,n): for k,v in n.children.iteritems():", "self.fd = None def __iter__(self): start = datetime.datetime.now() last_len = 0 s='' for", "(current >= level-1): if show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten()", "Storage: def __init__(self, pathname): self.year = None self.month = None self.country = None", "2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display =", "for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year)", "else: return None class Node: prefixsz = 8 def __init__(self): self.children={} self.value =", "print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception as e: print str(e)", "__str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return self.year", "= Node() return self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if k in", "non_cached = len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached()", "self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {}", "except: state = None try: country = unidecode.unidecode(address['country']) except: country = None self.loc=", "* level) if self.value: for v, cached in self.value: if not cached or", "k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached += sub_non_cached", "class Storage: def __init__(self, pathname): self.year = None self.month = None self.country =", "class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\"", "year text, month text, country text, state text, city text, filename text );", "\"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz * level)", "self.children: self.children[k] = Node() return self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if", "None: return None s = Storage(None) (s.year, s.month, s.country, s.state, s.city) = row", "self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ') lfileext =", "< minimum) and (n.size() < minimum) and (current >= level): if show_collapse: print", "cached = 0 non_cached = 0 if self.value: cached = len(filter(lambda (v,c): c,", "= fd else: self.fd = None except: self.fd = None def __iter__(self): start", "0 except Exception as e: print e import traceback tb = traceback.format_exc() print", "cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self): if len(self.pending) > 0: cursor", "dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key):", "{} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making", "s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s def", "def __iter__(self): start = datetime.datetime.now() last_len = 0 s='' for (i,x) in enumerate(self.lst):", "__init__(self): self.children={} self.value = [] def isLeaf(self): return len(self.children)==0 def add(self,k): if k", "?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state,", "= exifraw[0x0132][5:7] else: self.year = None self.month = None if 0x8825 in exifraw:", "sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?,", "= 8 def __init__(self): self.children={} self.value = [] def isLeaf(self): return len(self.children)==0 def", "\"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if row is None: return", "if show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def", "json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city']) except: city = None try:", "structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month;", "return self.country elif key=='s': return self.state elif key=='l': return self.city else: return None", "self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else:", "Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0 if", "= nom['address'] try: city = unidecode.unidecode(address['city']) except: city = None try: state =", "of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this", "structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps", "self.pending = {} class Progress: def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst)", "Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to", "as e: print str(e) def env(key,default): try: val = os.environ[key] if isinstance(default,bool): return", "self.lst = lst self.sz = len(self.lst) try: if fd.isatty(): self.fd = fd else:", "s is None: s = Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif())", "last_len = 0 s='' for (i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now()", "License: MIT import sys import re import os import shutil from PIL import", "?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql +=", "directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'. Y=Year;", "for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not", "e: #print str(e) pass dst = os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v)", "{} for v, cached in self.value: rtrn[v] = os.path.basename(v) if full_path else ''", "Progress(all_files,fd) for pathname in progress: s = cache.get(pathname) if cache else None if", "#print \"insert \"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname)", "= self.conn.cursor() sql = \"\"\" SELECT year, month, country, state, city FROM storage", "cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT EXISTS storage (", "in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run: try:", "to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args = parser.parse_args()", "self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix", "self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) =", "Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous)", "self.value: rtrn[v] = os.path.basename(v) if full_path else '' for k,n in self.children.iteritems(): for", "self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT", "k in self.children: self.children[k] = Node() return self.children[k] def merge(self,n): for k,v in", "= s if cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132", "> 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached", "val = os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return int(val)", "1 for (i,cc) in enumerate(oc): if cc > (1 if chr(i) in StorageTree.default_mode", "self.value += n.value def flatten(self): new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v)", "= lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files in os.walk(root)", "None self.city = None if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year", "= self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT EXISTS storage ( hash", "?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit()", "parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\")", "dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached)", "cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending = {} self.use_pending = use_pending def", "0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None", "storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d", "is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except", "__init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try: city", "print e import traceback tb = traceback.format_exc() print tb return -1 if __name__=='__main__':", "= [] sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?,", "# Look up an address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result =", "= {} if fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files =", "country = unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def move_files(args): (root, storage_levels,", "least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print", "(secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) /", "if self.use_pending: self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor", "permutation of 'YMCSL'\") # move the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump()", "{'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y':", "= \"INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES \" results =", "('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress: s = cache.get(pathname) if cache", "\" results = [] sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?,", "else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT *", "self.month = None if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S')", "level): if show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k]", "year, month, country, state, city FROM storage WHERE hash = ? ; \"\"\"", "Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0 return dd class Cache: def", "< minimum) and (current >= level): if show_collapse: print \"merging %s at %d", "print s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key):", "number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\")", "up an address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address", "EXISTS storage ( hash int PRIMARY KEY, year text, month text, country text,", "table storage\" self.pending = {} self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor()", "main(): try: # create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of", "results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\"", "= ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if row", "key=='s': return self.state elif key=='l': return self.city else: return None class Node: prefixsz", "merge(self,n): for k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k] =", "n.flatten() if (num_children < minimum) and (n.size() < minimum) and (current >= level):", "rows = cursor.fetchall() print rows def flush(self): if len(self.pending) > 0: cursor =", "for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def", "fd else: self.fd = None except: self.fd = None def __iter__(self): start =", "collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State;", "(%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd:", "use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT year, month, country,", "= (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an", "%s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {} for v, cached in", "parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving", "(cached, non_cached) def size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode):", "enumerate(oc): if cc > (1 if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid", "minimum) and (n.size() < minimum) and (current >= level): if show_collapse: print \"merging", "= sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT EXISTS", "k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >= level-1): if", ">= level): if show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del", "= StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min)", "+= n.value def flatten(self): new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children", "') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files", "= ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None @staticmethod", "in enumerate(oc): if cc > (1 if chr(i) in StorageTree.default_mode else 0): raise", "files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of", "(float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd", "nom['address'] try: city = unidecode.unidecode(address['city']) except: city = None try: state = unidecode.unidecode(address['state'])", "collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default", "count_cached(self): cached = 0 non_cached = 0 if self.value: cached = len(filter(lambda (v,c):", "predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \" (%.2f fps, %s", "get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename =", "fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower()", "(s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as", "else: self.year = None self.month = None if 0x8825 in exifraw: gpsraw =", "(n.size() < minimum) and (current >= level-1): if show_collapse: print \"flattening %s at", "dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd:", "stores = {} if fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files", "n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >= level-1): if show_collapse: print \"flattening", "sql = \"\"\" SELECT year, month, country, state, city FROM storage WHERE hash", "s.cached = True return s def __contains__(self,h): s = self[h] return s is", "cursor = self.conn.cursor() sql = \"\"\" SELECT year, month, country, state, city FROM", "def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems(): node = self.head for", "return 0 except Exception as e: print e import traceback tb = traceback.format_exc()", "os import shutil from PIL import Image import datetime import time import PIL.ExifTags", "text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending = {} self.use_pending", "'' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s)", "= datetime.datetime.now() last_len = 0 s='' for (i,x) in enumerate(self.lst): yield x elapsed", "if not cached or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached =", "class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with", "None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname)", "directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google", "(num_children < minimum) and (n.size() < minimum) and (current >= level): if show_collapse:", "the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of", "= len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current", "rtrn = {} for v, cached in self.value: rtrn[v] = os.path.basename(v) if full_path", "cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if row is None: return None", "len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) - cached for k,n in self.children.iteritems():", "city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending", "= os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s)) def", "= Node() for k,v in stores.iteritems(): node = self.head for m in mode:", "in %s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds =", "= datetime.datetime.now() - start if i+1 == self.sz: # last one predict_display =", "!= dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception as e:", "number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory", "import time import PIL.ExifTags import sqlite3 import requests import json import unidecode import", "prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0) or", "__init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse geocoding gmaps", "= GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e: #print \"Exception %s: %s\"", "s if cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in", "float(self.sz-i) / rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display =", "def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename", "for filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname", "= None try: state = unidecode.unidecode(address['state']) except: state = None try: country =", "results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class", "0 if self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) -", "None self.month = None self.country = None self.state = None self.city = None", "GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e: #print", "?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql", "def __init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else:", "try: state = unidecode.unidecode(address['state']) except: state = None try: country = unidecode.unidecode(address['country']) except:", "\"\"\" INSERT INTO storage(hash, year, month, country, state, city, filename) VALUES (?, ?,", "Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator)", "without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be", "# (c)2018 <NAME> # License: MIT import sys import re import os import", "lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress: s = cache.get(pathname)", "s.state, s.city) = row s.cached = True return s def __contains__(self,h): s =", "Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached", "print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached = 0 if self.value:", "cursor.fetchall() print rows def flush(self): if len(self.pending) > 0: cursor = self.conn.cursor() sql", "Progress: def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try: if fd.isatty(): self.fd", "= None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator,", "def flatten(self): new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children", "= Storage(None) (s.year, s.month, s.country, s.state, s.city) = row s.cached = True return", "os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql =", "super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result", "s.month, s.country, s.state, s.city) = row s.cached = True return s def __contains__(self,h):", "# last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i > 2: rate", "start if i+1 == self.sz: # last one predict_display = \" in %s\"%(str(elapsed)[:7])", "# check the order args.order=args.order.lower() oc = [0]*128 for ch in args.order: oc[ord(ch)]", "of 'YMCSL'\") # move the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return", "stores[pathname] = s if cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if", "not k in self.children: self.children[k] = Node() return self.children[k] def merge(self,n): for k,v", "new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value +=", "from PIL import Image import datetime import time import PIL.ExifTags import sqlite3 import", "= os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception", "exifraw[0x0132][5:7] else: self.year = None self.month = None if 0x8825 in exifraw: gpsraw", "unidecode.unidecode(address['city']) except: city = None try: state = unidecode.unidecode(address['state']) except: state = None", "self.conn.commit() #print \"created table storage\" self.pending = {} self.use_pending = use_pending def __getitem__(self,h):", "gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None", "import unidecode import argparse class Storage: def __init__(self, pathname): self.year = None self.month", "month text, country text, state text, city text, filename text ); \"\"\" cursor.execute(sql)", "be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args = parser.parse_args() #", "of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure", "'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key", "rtrn def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz * level) if self.value:", "rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s", "the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as", "\"insert \"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if", "flatten(self): new_node = Node() for k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value", "s = cache.get(pathname) if cache else None if s is None: s =", "sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT EXISTS storage", "in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress: s = cache.get(pathname) if", "s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s): h", "if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass dst", "show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path):", "try: im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month: s.year =", "self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n", "Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees", "results = [] sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?,", "self.city else: return None class Node: prefixsz = 8 def __init__(self): self.children={} self.value", "dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = '", "== 'true' elif isinstance(default,int): return int(val) else: return val except: return default def", "None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees", "\" in %s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds", "move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e: print", "len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached +=", "class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems(): node", "moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation", "Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE", "isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return int(val) else: return val except:", "d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except", "return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash,", "for d, _, files in os.walk(root) for filename in files if lfileext(filename) in", "exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None self.month = None if 0x8825", "for v, cached in self.value: rtrn[v] = os.path.basename(v) if full_path else '' for", "str(e) def env(key,default): try: val = os.environ[key] if isinstance(default,bool): return val.lower() == 'true'", "dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception as e: print", "collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum)", "[] sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?,", "FROM storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print", "self.conn.cursor() sql = \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall()", "None if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month", "= os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst)", "print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in", "show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum)", "def address_part(address,key): for d in address: if key in d['types']: return d['long_name'] return", "storage(hash, year, month, country, state, city,filename) VALUES \" results = [] sqlext =", "filename) VALUES (?, ?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year,", "None self.state = None self.city = None if pathname: unix = os.path.getmtime(pathname) dt", "s def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] =", "+ len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in", "storage_tree.dump(show_cached) print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v", "self.children={} self.value = [] def isLeaf(self): return len(self.children)==0 def add(self,k): if k is", "(i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start if i+1 ==", "+= sub_non_cached return (cached, non_cached) def size(self): return len(self.value) + len(self.children) class StorageTree:", "= \"\"\" INSERT INTO storage(hash, year, month, country, state, city, filename) VALUES (?,", "with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city", "float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0", "<NAME> # License: MIT import sys import re import os import shutil from", "default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems(): node = self.head", "= [os.path.join(d,filename) for d, _, files in os.walk(root) for filename in files if", "in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached)", "None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached)", "this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n')", "try: city = unidecode.unidecode(address['city']) except: city = None try: state = unidecode.unidecode(address['state']) except:", "C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to use Google", "\"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending = {} self.use_pending = use_pending", "for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >= level-1):", "(Node.prefixsz * level) if self.value: for v, cached in self.value: if not cached", "pass if cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return cls(stores,mode) class", "= n.count_cached() cached += sub_cached non_cached += sub_non_cached return (cached, non_cached) def size(self):", "str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return self.year elif key=='m':", "if not k in self.children: self.children[k] = Node() return self.children[k] def merge(self,n): for", "if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W')", "\"+str(row) if row is None: return None s = Storage(None) (s.year, s.month, s.country,", "gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country')", "if i+1 == self.sz: # last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif", "# move the files move_files(args) print \"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except", "cached or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if", "(root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree =", "sqlite3 import requests import json import unidecode import argparse class Storage: def __init__(self,", "def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s)", "chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order. Must be permutation", "self.value: for v, cached in self.value: if not cached or show_cached: print prefix+os.path.basename(v)", "return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom", "except Exception as e: print e import traceback tb = traceback.format_exc() print tb", "k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def", "return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql", "in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country)", "#print \"created table storage\" self.pending = {} self.use_pending = use_pending def __getitem__(self,h): cursor", "return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def", "storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found", "last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object):", "googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse geocoding gmaps = googlemaps.Client(key=key)", "month, country, state, city, filename) VALUES (?, ?, ?, ?, ?, ?, ?);", "(i,cc) in enumerate(oc): if cc > (1 if chr(i) in StorageTree.default_mode else 0):", "country, state, city,filename) VALUES \" results = [] sqlext = [] for (k,(f,v))", "self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children)", "if (n.size() < minimum) and (current >= level-1): if show_collapse: print \"flattening %s", "< last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush()", "cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw):", "if fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for", "None self.country = None self.state = None self.city = None if pathname: unix", "%d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {} for v,", "parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'.", "import datetime import time import PIL.ExifTags import sqlite3 import requests import json import", "def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) #", "for ch in args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if cc", "-1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor()", "= \"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict())", "= unidecode.unidecode(address['state']) except: state = None try: country = unidecode.unidecode(address['country']) except: country =", "else: return val except: return default def main(): try: # create the args", "google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing", "in args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if cc > (1", "an address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address =", "isLeaf(self): return len(self.children)==0 def add(self,k): if k is None: return self if not", "s='' for (i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start if", "d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom =", "> (1 if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order.", "= float(self.sz-i) / rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display", "r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city'])", "return len(self.children)==0 def add(self,k): if k is None: return self if not k", "self.fd = None except: self.fd = None def __iter__(self): start = datetime.datetime.now() last_len", "_, files in os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress", "#print \"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s if", "args = parser.parse_args() # check the order args.order=args.order.lower() oc = [0]*128 for ch", "print str(e) def env(key,default): try: val = os.environ[key] if isinstance(default,bool): return val.lower() ==", "CREATE TABLE IF NOT EXISTS storage ( hash int PRIMARY KEY, year text,", "= this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def", "unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached", "import sys import re import os import shutil from PIL import Image import", "FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self): if len(self.pending)", "in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children =", "os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except", "import json import unidecode import argparse class Storage: def __init__(self, pathname): self.year =", "print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and", "else: self.lat = None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator,", "GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address']", "str(e) pass dst = os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if not", "return self if not k in self.children: self.children[k] = Node() return self.children[k] def", "level) if self.value: for v, cached in self.value: if not cached or show_cached:", "self.head = Node() for k,v in stores.iteritems(): node = self.head for m in", "= 0 if self.value: cached = len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value)", "k in self.children: self.children[k].merge(v) else: self.children[k] = v self.value += n.value def flatten(self):", "filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in", "(float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator))", "__setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year, month, country,", "= None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google,", "+= sub_cached non_cached += sub_non_cached return (cached, non_cached) def size(self): return len(self.value) +", "cursor = self.conn.cursor() sql = \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows", "self.country elif key=='s': return self.state elif key=='l': return self.city else: return None class", "def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse geocoding", "k is None: return self if not k in self.children: self.children[k] = Node()", "for v, cached in self.value: if not cached or show_cached: print prefix+os.path.basename(v) for", "\",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd):", "im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year", "self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key):", "self.month elif key=='c': return self.country elif key=='s': return self.state elif key=='l': return self.city", "= exif.year s.month = exif.month if exif.lat and exif.lon: if google is None:", "cached (previous) elements in directory structure\") args = parser.parse_args() # check the order", "- start if i+1 == self.sz: # last one predict_display = \" in", "parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory", "= exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None self.month = None if", "city = None try: state = unidecode.unidecode(address['state']) except: state = None try: country", "# License: MIT import sys import re import os import shutil from PIL", "None if s is None: s = Storage(pathname) try: im = Image.open(pathname) exif", "order args.order=args.order.lower() oc = [0]*128 for ch in args.order: oc[ord(ch)] += 1 for", "Specify this key to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos", "exif.month if exif.lat and exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else:", "else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e: #print \"Exception", "= Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month:", "return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores =", "if full_path else '' for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk]", "in address: if key in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def", "storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self): if len(self.pending) >", "in enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start if i+1 == self.sz:", "cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return cls(stores,mode) class ExifData: def", "8 def __init__(self): self.children={} self.value = [] def isLeaf(self): return len(self.children)==0 def add(self,k):", "%s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back =", "size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node()", "num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and", "except: return default def main(): try: # create the args list parser =", "if show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children <", "and (current >= level-1): if show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys())", "node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod", "show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for", "self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime))", "storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if", "try: country = unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def move_files(args): (root,", "level-1): if show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children", "for k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k] = v", "(minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes =", "__init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try: if fd.isatty(): self.fd = fd", "import re import os import shutil from PIL import Image import datetime import", "in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat", "= self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES", "requests import json import unidecode import argparse class Storage: def __init__(self, pathname): self.year", "fd.isatty(): self.fd = fd else: self.fd = None except: self.fd = None def", "self.conn.cursor() sql = \"\"\" SELECT year, month, country, state, city FROM storage WHERE", "state, city FROM storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row =", "with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {} for v, cached", "import PIL.ExifTags import sqlite3 import requests import json import unidecode import argparse class", "k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k] = v self.value", "0 s='' for (i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start", "mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor()", "k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0) or show_cached: print", "= {} class Progress: def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try:", "- cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached", "= self.head for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return", "not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass dst =", "# dropbox-photo-sorter # (c)2018 <NAME> # License: MIT import sys import re import", "s = self[h] return s is not None def get(self,pathname): h = self.make_hash(pathname)", "self.use_pending = use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT year,", "if cache: cache.flush() return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw:", "return (cached, non_cached) def size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def", "unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse,", "self.year elif key=='m': return self.month elif key=='c': return self.country elif key=='s': return self.state", "at %d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size() < minimum)", "if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd:", "= \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def", "reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements", "os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT INTO", "cache.get(pathname) if cache else None if s is None: s = Storage(pathname) try:", "# create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum", "if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s:", "@classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ') lfileext = lambda", "self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year, month, country, state, city, filename)", "if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len < last_len:", "predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len", "mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print", "cc > (1 if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for", "city = unidecode.unidecode(address['city']) except: city = None try: state = unidecode.unidecode(address['state']) except: state", "\" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if", "= os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT", "= None self.country = None self.state = None self.city = None if pathname:", "(float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0", "address_part(address,key): for d in address: if key in d['types']: return d['long_name'] return None", "Must be permutation of 'YMCSL'\") # move the files move_files(args) print \"This is", "def add(self,k): if k is None: return self if not k in self.children:", "args.order=args.order.lower() oc = [0]*128 for ch in args.order: oc[ord(ch)] += 1 for (i,cc)", "text, state text, city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created", "in directory structure\") args = parser.parse_args() # check the order args.order=args.order.lower() oc =", "in stores.iteritems(): node = self.head for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached))", "structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must", "= True return s def __contains__(self,h): s = self[h] return s is not", "WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row)", "return None class Node: prefixsz = 8 def __init__(self): self.children={} self.value = []", "self.head for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path)", "sql = \"INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES \" results", "sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city)", "time import PIL.ExifTags import sqlite3 import requests import json import unidecode import argparse", "\"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self):", "if row is None: return None s = Storage(None) (s.year, s.month, s.country, s.state,", "enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start if i+1 == self.sz: #", "key to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be", "self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1)", "__getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT year, month, country, state, city", "self.month = exifraw[0x0132][5:7] else: self.year = None self.month = None if 0x8825 in", "in self.children: self.children[k].merge(v) else: self.children[k] = v self.value += n.value def flatten(self): new_node", "requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city']) except: city", "* FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self): if", "self.children[k] = Node() return self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if k", "\"%s: %s\"%(filename,s) except Exception as e: #print \"Exception %s: %s\" %(filename,str(e)) pass if", "= GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e:", "d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon))", "n.children.iteritems(): if k in self.children: self.children[k].merge(v) else: self.children[k] = v self.value += n.value", "+= 1 for (i,cc) in enumerate(oc): if cc > (1 if chr(i) in", "dst = os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if not dry_run: try:", "if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached =", "reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city =", "MIT import sys import re import os import shutil from PIL import Image", "= len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached", "= Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0 return dd", "in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached += sub_non_cached return", "gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state", "dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz * level) if self.value: for v,", "= '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len =", "in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = ' '", "return cls(stores,mode) class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4]", "(1 if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order. Must", "predict_display = \" in %s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds() predict", "self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES \"", "start = datetime.datetime.now() last_len = 0 s='' for (i,x) in enumerate(self.lst): yield x", "\";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd): self.lst = lst", "= gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country =", "#print \"%s: %s\"%(filename,s) except Exception as e: #print \"Exception %s: %s\" %(filename,str(e)) pass", "= ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator,", "fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d,", "in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r =", "exif.lat and exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) =", "for d in address: if key in d['types']: return d['long_name'] return None class", "in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0):", "state text, city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table", "is None: return None s = Storage(None) (s.year, s.month, s.country, s.state, s.city) =", "\"created table storage\" self.pending = {} self.use_pending = use_pending def __getitem__(self,h): cursor =", "VALUES \" results = [] sqlext = [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?,", "None if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon =", "storage(hash, year, month, country, state, city, filename) VALUES (?, ?, ?, ?, ?,", "ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7]", "Look up an address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon))", "in progress: s = cache.get(pathname) if cache else None if s is None:", "self.children: self.children[k].merge(v) else: self.children[k] = v self.value += n.value def flatten(self): new_node =", "return self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if k in self.children: self.children[k].merge(v)", "dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass dst = os.path.join(root,v)", "= datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def dict(self):", "class ExifData: def __init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month =", "def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached)", "(args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at", "if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return int(val) else: return val", "= None except: self.fd = None def __iter__(self): start = datetime.datetime.now() last_len =", "Image import datetime import time import PIL.ExifTags import sqlite3 import requests import json", "Exception as e: print e import traceback tb = traceback.format_exc() print tb return", "(k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month)", "= self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s else:", "self.lat = None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator),", "%s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {}", "self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT * FROM storage;", "float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0", "mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def", "#print str(e) pass dst = os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if", "= use_pending def __getitem__(self,h): cursor = self.conn.cursor() sql = \"\"\" SELECT year, month,", "dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def", "Exception as e: #print str(e) pass dst = os.path.join(root,v) if k != dst:", "S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to use Google Maps", "ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator),", "self.fd.write(back+s) this_len = len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len =", "PRIMARY KEY, year text, month text, country text, state text, city text, filename", "(filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql =", "len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush() if", "k,v in self.children.iteritems(): new_node.merge(v) self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children", "self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None self.month = None", "try: # create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\")", "\"This is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e: print e import", "__getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return self.year elif key=='m': return self.month", "geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality')", "s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s):", "prefix = ' ' * (Node.prefixsz * level) if self.value: for v, cached", "prefixsz = 8 def __init__(self): self.children={} self.value = [] def isLeaf(self): return len(self.children)==0", "+= \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd): self.lst =", "new_node.merge(v) self.children = new_node.children self.value += new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for", "{} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d", "\"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending:", "self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {} for v, cached in self.value:", "s.month = exif.month if exif.lat and exif.lon: if google is None: (s.city,s.state,s.country) =", "Storage(None) (s.year, s.month, s.country, s.state, s.city) = row s.cached = True return s", "{} if fd: fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename)", "key=='m': return self.month elif key=='c': return self.country elif key=='s': return self.state elif key=='l':", "None def __iter__(self): start = datetime.datetime.now() last_len = 0 s='' for (i,x) in", "filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\"", "= \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows", "\"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def", "[] def isLeaf(self): return len(self.children)==0 def add(self,k): if k is None: return self", "= exif.month if exif.lat and exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc", "None try: country = unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def move_files(args):", "GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon)", "of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display", "os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False", "> 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display", "re import os import shutil from PIL import Image import datetime import time", "self.children[k] def dict(self,full_path): rtrn = {} for v, cached in self.value: rtrn[v] =", "for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0) or show_cached:", "GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address with reverse", "0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached =", "state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in", "full_path else '' for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] =", "= row s.cached = True return s def __contains__(self,h): s = self[h] return", "cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached", "0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else:", "pass dst = os.path.join(root,v) if k != dst: print \"%s->%s\"%(k,v) if not dry_run:", "row is None: return None s = Storage(None) (s.year, s.month, s.country, s.state, s.city)", "parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\")", "ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None @staticmethod def", "os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass dst = os.path.join(root,v) if k", "minimum) and (current >= level-1): if show_collapse: print \"flattening %s at %d with", "if self.value: for v, cached in self.value: if not cached or show_cached: print", "def dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT * FROM storage; \"\"\"", "= datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7])", "class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps", "= exifraw[0x8825] self.lat = ExifData.degrees(gpsraw[2],gpsraw[1]=='S') self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon", "exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print", "minimum) and (current >= level): if show_collapse: print \"merging %s at %d with", "return val.lower() == 'true' elif isinstance(default,int): return int(val) else: return val except: return", "os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self):", "dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql =", "show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {}", "== self.sz: # last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i >", "os.path.basename(v) if full_path else '' for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems():", "def __getitem__(self,key): return self.dict()[key] def item(self,key): if key=='y': return self.year elif key=='m': return", "googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1')", "dict(self,full_path): rtrn = {} for v, cached in self.value: rtrn[v] = os.path.basename(v) if", "make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor =", "int(val) else: return val except: return default def main(): try: # create the", "SELECT year, month, country, state, city FROM storage WHERE hash = ? ;", "args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least", "m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True):", "dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores", "< minimum) and (current >= level-1): if show_collapse: print \"flattening %s at %d", "i > 2: rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate)", "at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn = {} for", "else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class", "None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception", "country, state, city FROM storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row", "= None self.state = None self.city = None if pathname: unix = os.path.getmtime(pathname)", "cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month, country, state, city,filename)", "= (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels", "/ float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees + Minutes/60.0 +", "GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address: if", "?, ?, ?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql", "else: self.children[k] = v self.value += n.value def flatten(self): new_node = Node() for", "for --order. Must be permutation of 'YMCSL'\") # move the files move_files(args) print", "is None: s = Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif()) if", "oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if cc > (1 if chr(i)", "key=='y': return self.year elif key=='m': return self.month elif key=='c': return self.country elif key=='s':", "s = Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and", "self.state elif key=='l': return self.city else: return None class Node: prefixsz = 8", "+ Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0 return dd class Cache:", "False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key]", "in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure", "GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address: if key in d['types']:", "predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s =", "one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds()", "= reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod", "{} class Progress: def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try: if", "if cache else None if s is None: s = Storage(pathname) try: im", "= GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address: if key in", "= None try: country = unidecode.unidecode(address['country']) except: country = None self.loc= (city,state,country) def", "n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached = 0 if self.value: cached =", "be permutation of 'YMCSL'\") # move the files move_files(args) print \"This is DropBoxPhotoSorter\"", "filename)) self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname) filename", "#cache_.dump() return 0 except Exception as e: print e import traceback tb =", "import os import shutil from PIL import Image import datetime import time import", "None: return self if not k in self.children: self.children[k] = Node() return self.children[k]", "%s\"%(filename,s) except Exception as e: #print \"Exception %s: %s\" %(filename,str(e)) pass if cache:", "--order. Must be permutation of 'YMCSL'\") # move the files move_files(args) print \"This", "is None: return self if not k in self.children: self.children[k] = Node() return", "Seconds/3600.0 if neg: dd *= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn", "google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s)", "StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument for --order. Must be permutation of 'YMCSL'\")", "(None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up an address", "text, month text, country text, state text, city text, filename text ); \"\"\"", "= cache.get(pathname) if cache else None if s is None: s = Storage(pathname)", "+= new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if", "self.month = \"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return", "+= \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def", "if cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush() return cls(stores,mode) class ExifData:", "def size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head =", "last one predict_display = \" in %s\"%(str(elapsed)[:7]) elif i > 2: rate =", "h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname)", "class Node: prefixsz = 8 def __init__(self): self.children={} self.value = [] def isLeaf(self):", "return default def main(): try: # create the args list parser = argparse.ArgumentParser()", "= cursor.fetchone() #print \"found \"+str(row) if row is None: return None s =", "pathname in progress: s = cache.get(pathname) if cache else None if s is", "= self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return", "?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print", "(s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e: #print \"Exception %s:", "for (i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now() - start if i+1", "node = node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse):", "None: s = Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year", "v self.value += n.value def flatten(self): new_node = Node() for k,v in self.children.iteritems():", "show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse:", "DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e: print e import traceback tb", "return self.state elif key=='l': return self.city else: return None class Node: prefixsz =", "StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse)", "= os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached =", "text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending =", "= cursor.fetchall() print rows def flush(self): if len(self.pending) > 0: cursor = self.conn.cursor()", "= json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city']) except: city = None", "self.state = None self.city = None if pathname: unix = os.path.getmtime(pathname) dt =", "self.children[k] = v self.value += n.value def flatten(self): new_node = Node() for k,v", "city,filename) VALUES \" results = [] sqlext = [] for (k,(f,v)) in self.pending.iteritems():", ">= level-1): if show_collapse: print \"flattening %s at %d with %s\"%(k,current,n.children.keys()) n.flatten() if", "dd *= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor", "def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() <", "results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending =", "dropbox-photo-sorter # (c)2018 <NAME> # License: MIT import sys import re import os", "(c)2018 <NAME> # License: MIT import sys import re import os import shutil", "= None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw", "Maps API Key. Specify this key to use Google Maps reverse geo-code service\")", "reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components'] city = GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country", "= unidecode.unidecode(address['city']) except: city = None try: state = unidecode.unidecode(address['state']) except: state =", "as e: #print str(e) pass dst = os.path.join(root,v) if k != dst: print", "def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ') lfileext = lambda f:", "\"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached = 0 if self.value: cached", "/ float(secondsDenominator)) dd = Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd *=", "k != dst: print \"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception as", "pathname): self.year = None self.month = None self.country = None self.state = None", "return self.city else: return None class Node: prefixsz = 8 def __init__(self): self.children={}", "= GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for", "\"%s->%s\"%(k,v) if not dry_run: try: shutil.move(k,dst) except Exception as e: print str(e) def", "argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\")", "0: cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month, country, state,", "else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s)", "rate = float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \"", "exif.year s.month = exif.month if exif.lat and exif.lon: if google is None: (s.city,s.state,s.country)", "); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\" self.pending = {} self.use_pending =", "= self.conn.cursor() sql = \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows =", "parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to use Google Maps reverse geo-code", "[os.path.join(d,filename) for d, _, files in os.walk(root) for filename in files if lfileext(filename)", "= [0]*128 for ch in args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc):", "if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc =", "and (current >= level): if show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys())", "else: self.fd = None except: self.fd = None def __iter__(self): start = datetime.datetime.now()", "[0]*128 for ch in args.order: oc[ord(ch)] += 1 for (i,cc) in enumerate(oc): if", "self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) =", "or show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached", "and exif.lon: if google is None: (s.city,s.state,s.country) = GeoCoderOpenStreetmap(exif.lat,exif.lon).loc else: (s.city,s.state,s.country) = GeoCoderGoogle(exif.lat,exif.lon,google).loc", "return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT * FROM", "def isLeaf(self): return len(self.children)==0 def add(self,k): if k is None: return self if", "x elapsed = datetime.datetime.now() - start if i+1 == self.sz: # last one", "s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname)", "if (num_children < minimum) and (n.size() < minimum) and (current >= level): if", "self.country = None self.state = None self.city = None if pathname: unix =", "isinstance(default,int): return int(val) else: return val except: return default def main(): try: #", "if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year =", "try: val = os.environ[key] if isinstance(default,bool): return val.lower() == 'true' elif isinstance(default,int): return", "new_node.value def collapse(self,level,minimum,current,show_collapse): num_children = len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size()", "key in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r", "minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator)", "in os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd)", "import requests import json import unidecode import argparse class Storage: def __init__(self, pathname):", "dump(self): cursor = self.conn.cursor() sql = \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql)", "= lst self.sz = len(self.lst) try: if fd.isatty(): self.fd = fd else: self.fd", "Exception as e: print str(e) def env(key,default): try: val = os.environ[key] if isinstance(default,bool):", "self.year = None self.month = None self.country = None self.state = None self.city", "containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show cached (previous) elements in directory structure\") args", "self.lon = ExifData.degrees(gpsraw[4],gpsraw[3]=='W') else: self.lat = None self.lon = None @staticmethod def degrees(raw,neg):", "exif.month: s.year = exif.year s.month = exif.month if exif.lat and exif.lon: if google", "hash = ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if", "chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len", "state, city, filename) VALUES (?, ?, ?, ?, ?, ?, ?); \"\"\" cursor.execute(sql,", "import Image import datetime import time import PIL.ExifTags import sqlite3 import requests import", "= {} for v, cached in self.value: rtrn[v] = os.path.basename(v) if full_path else", "else 0): raise RuntimeError(\"Invalid argument for --order. Must be permutation of 'YMCSL'\") #", "filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s))", "len(self.pending) > 0: cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month,", "float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd =", "parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory", "dd = Degrees + Minutes/60.0 + Seconds/3600.0 if neg: dd *= -1.0 return", "d, _, files in os.walk(root) for filename in files if lfileext(filename) in ('.jpg','.jpeg')]", "else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def", "= len(filter(lambda (v,c): c, self.value)) non_cached = len(self.value) - cached for k,n in", "sql = \"\"\" INSERT INTO storage(hash, year, month, country, state, city, filename) VALUES", "n.count_cached() cached += sub_cached non_cached += sub_non_cached return (cached, non_cached) def size(self): return", "GeoCoderGoogle.address_part(address,'locality') state = GeoCoderGoogle.address_part(address,'administrative_area_level_1') country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d", "(s.year, s.month, s.country, s.state, s.city) = row s.cached = True return s def", "f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files in os.walk(root) for filename", "except Exception as e: print str(e) def env(key,default): try: val = os.environ[key] if", "not None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime =", "rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz", "exif.year and exif.month: s.year = exif.year s.month = exif.month if exif.lat and exif.lon:", "except: self.fd = None def __iter__(self): start = datetime.datetime.now() last_len = 0 s=''", "create the args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number", "import shutil from PIL import Image import datetime import time import PIL.ExifTags import", "sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress: def __init__(self,lst,fd): self.lst", "results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results)", "entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory", "permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify", "except Exception as e: #print \"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s)", "n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix = ' ' *", "= 0 s='' for (i,x) in enumerate(self.lst): yield x elapsed = datetime.datetime.now() -", "def __contains__(self,h): s = self[h] return s is not None def get(self,pathname): h", "cached in self.value: if not cached or show_cached: print prefix+os.path.basename(v) for k,n in", "in files if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress:", "del self.children[k] def dict(self,full_path): rtrn = {} for v, cached in self.value: rtrn[v]", "= \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display)", "storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree = StorageTree.fromDirectory(root,sys.stdout,Cache(root),mode,google) if", "len(self.children) for k,n in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >=", "self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon): self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder):", "@staticmethod def address_part(address,key): for d in address: if key in d['types']: return d['long_name']", "class Progress: def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try: if fd.isatty():", "argument for --order. Must be permutation of 'YMCSL'\") # move the files move_files(args)", "( hash int PRIMARY KEY, year text, month text, country text, state text,", "in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year = None self.month", "def make_hash(self,pathname): mtime = os.path.getmtime(pathname) filename = os.path.basename(pathname) return hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor", "%d with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size() < minimum) and", "cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename)) self.conn.commit() #print \"insert \"+str(h) return", "address: if key in d['types']: return d['long_name'] return None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon):", "else '' for k,n in self.children.iteritems(): for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv)", "self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >= level-1): if show_collapse: print", "self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address: if key in d['types']: return", "in self.children.items(): n.collapse(level,minimum,current+1,show_collapse) if (n.size() < minimum) and (current >= level-1): if show_collapse:", "item(self,key): if key=='y': return self.year elif key=='m': return self.month elif key=='c': return self.country", "datetime.datetime.now() - start if i+1 == self.sz: # last one predict_display = \"", "neg: dd *= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db'))", "return rtrn def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz * level) if", "None try: state = unidecode.unidecode(address['state']) except: state = None try: country = unidecode.unidecode(address['country'])", "before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\")", "Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year s.month =", "self.loc = (None,None,None) class GeoCoderGoogle(GeoCoder): def __init__(self,lat,lon,key): import googlemaps super(GeoCoderGoogle,self).__init__(lat,lon) # Look up", "dry_run: try: shutil.move(k,dst) except Exception as e: print str(e) def env(key,default): try: val", "?, ?, ?)\") results.append(k) results.append(v.year) results.append(v.month) results.append(v.country) results.append(v.state) results.append(v.city) results.append(f) sql += \",\".join(sqlext)", "def __init__(self, pathname): self.year = None self.month = None self.country = None self.state", "len(self.lst) try: if fd.isatty(): self.fd = fd else: self.fd = None except: self.fd", "RuntimeError(\"Invalid argument for --order. Must be permutation of 'YMCSL'\") # move the files", "storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d))", "' ' * (Node.prefixsz * level) if self.value: for v, cached in self.value:", "return s def __contains__(self,h): s = self[h] return s is not None def", "def __init__(self,lst,fd): self.lst = lst self.sz = len(self.lst) try: if fd.isatty(): self.fd =", "API Key. Specify this key to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory", "collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing ') lfileext", "h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s", "s.country, s.state, s.city) = row s.cached = True return s def __contains__(self,h): s", "(current >= level): if show_collapse: print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n)", "None self.lon = None @staticmethod def degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator))", "val.lower() == 'true' elif isinstance(default,int): return int(val) else: return val except: return default", "progress = Progress(all_files,fd) for pathname in progress: s = cache.get(pathname) if cache else", "self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return s else: return", "Exception as e: #print \"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname]", "with %s\"%(k,current,n.children.keys()) n.flatten() if (num_children < minimum) and (n.size() < minimum) and (current", "NOT EXISTS storage ( hash int PRIMARY KEY, year text, month text, country", "hash int PRIMARY KEY, year text, month text, country text, state text, city", "os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files in os.walk(root) for filename in", "lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files in", "return s def put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h]", "len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v", "= self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year, month, country, state, city,", "e import traceback tb = traceback.format_exc() print tb return -1 if __name__=='__main__': sys.exit(main())", "datetime import time import PIL.ExifTags import sqlite3 import requests import json import unidecode", "def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def", "fd.write('Processing ') lfileext = lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _,", "= [] def isLeaf(self): return len(self.children)==0 def add(self,k): if k is None: return", "Storage(pathname) try: im = Image.open(pathname) exif = ExifData(im._getexif()) if exif.year and exif.month: s.year", "def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google):", "sql = \"\"\" SELECT * FROM storage; \"\"\" cursor.execute(sql) rows = cursor.fetchall() print", "= node.add(v.item(m)) node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse)", "this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s class GeoCoder(object): def __init__(self,lat,lon):", "def count_cached(self): cached = 0 non_cached = 0 if self.value: cached = len(filter(lambda", "sub_non_cached) = n.count_cached() cached += sub_cached non_cached += sub_non_cached return (cached, non_cached) def", "self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len < last_len: self.fd.write('", "+ Seconds/3600.0 if neg: dd *= -1.0 return dd class Cache: def __init__(self,rootdir,use_pending=False):", "= (float(minutesNumerator) / float(minutesDenominator)) Seconds = (float(secondsNumerator) / float(secondsDenominator)) dd = Degrees +", "year, month, country, state, city, filename) VALUES (?, ?, ?, ?, ?, ?,", "remaining)\"%(rate,str(predict)[:7]) else: predict_display = '' s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len", "for (i,cc) in enumerate(oc): if cc > (1 if chr(i) in StorageTree.default_mode else", "for dk,dv in n.dict(full_path).iteritems(): rtrn[dk] = \"%s/%s\"%(k,dv) return rtrn def dump(self,show_cached=True,level=0): prefix =", "node.value.append((k,v.cached)) def dict(self,full_path): return self.head.dict(full_path) def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def", "= \"\"\" SELECT year, month, country, state, city FROM storage WHERE hash =", "\"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print", "directory structure without moving files\") parser.add_argument('--show-collapse',default=env('SHOW_COLLAPSE',False),action=\"store_true\",help=\"Display directory structure before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure.", "None class GeoCoderOpenStreetmap(GeoCoder): def __init__(self,lat,lon): super(GeoCoderOpenStreetmap,self).__init__(lat,lon) r = requests.get(\"https://nominatim.openstreetmap.org/reverse?format=json&lat=%f&lon=%f&zoom=18&addressdetails=1\"%(lat,lon)) nom = json.loads(r.text) address", "INSERT INTO storage(hash, year, month, country, state, city, filename) VALUES (?, ?, ?,", "self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def dict(self): return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city}", "elif key=='l': return self.city else: return None class Node: prefixsz = 8 def", "GeoCoderGoogle(exif.lat,exif.lon,google).loc #print \"%s: %s\"%(filename,s) except Exception as e: #print \"Exception %s: %s\" %(filename,str(e))", "PIL import Image import datetime import time import PIL.ExifTags import sqlite3 import requests", "hash((filename,mtime)) def __setitem__(self,h,(filename,s)): cursor = self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year,", "stores.iteritems(): node = self.head for m in mode: node = node.add(v.item(m)) node.value.append((k,v.cached)) def", "datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month = \"{:%m}\".format(dt) self.cached = False def dict(self): return", "directory structure\") args = parser.parse_args() # check the order args.order=args.order.lower() oc = [0]*128", "%s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s if cache: cache.flush()", "check the order args.order=args.order.lower() oc = [0]*128 for ch in args.order: oc[ord(ch)] +=", "= ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year s.month = exif.month if", "if k is None: return self if not k in self.children: self.children[k] =", "self.conn.cursor() sql = \"\"\" CREATE TABLE IF NOT EXISTS storage ( hash int", "non_cached += sub_non_cached return (cached, non_cached) def size(self): return len(self.value) + len(self.children) class", "if pathname: unix = os.path.getmtime(pathname) dt = datetime.datetime.utcfromtimestamp(unix) self.year = \"{:%Y}\".format(dt) self.month =", "def flush(self): if len(self.pending) > 0: cursor = self.conn.cursor() sql = \"INSERT INTO", "parser.parse_args() # check the order args.order=args.order.lower() oc = [0]*128 for ch in args.order:", "%s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as e: #print str(e) pass", "e: #print \"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] = s", "sub_non_cached return (cached, non_cached) def size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl'", "all_files = [os.path.join(d,filename) for d, _, files in os.walk(root) for filename in files", "parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate", "= len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else: last_len = this_len self.fd.flush()", "as e: print e import traceback tb = traceback.format_exc() print tb return -1", "sys import re import os import shutil from PIL import Image import datetime", "as e: #print \"Exception %s: %s\" %(filename,str(e)) pass if cache: cache.put(pathname,s) stores[pathname] =", "= None self.month = None if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat", "val except: return default def main(): try: # create the args list parser", "= os.path.basename(v) if full_path else '' for k,n in self.children.iteritems(): for dk,dv in", "= v self.value += n.value def flatten(self): new_node = Node() for k,v in", "n.count_cached() if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached", "in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k)", "= 0 non_cached = 0 if self.value: cached = len(filter(lambda (v,c): c, self.value))", "? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if row is", "row s.cached = True return s def __contains__(self,h): s = self[h] return s", "def dump(self,show_cached=True,level=0): prefix = ' ' * (Node.prefixsz * level) if self.value: for", "country = None self.loc= (city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode,", "float(i)/elapsed.total_seconds() predict = datetime.timedelta(seconds = float(self.sz-i) / rate) predict_display = \" (%.2f fps,", "None except: self.fd = None def __iter__(self): start = datetime.datetime.now() last_len = 0", "ExifData(im._getexif()) if exif.year and exif.month: s.year = exif.year s.month = exif.month if exif.lat", "= chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len < last_len: self.fd.write(' '*(last_len-this_len)) else:", "put(self,pathname,s): h = self.make_hash(pathname) filename = os.path.basename(pathname) if self.use_pending: self.pending[h] = (filename,s) return", "k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run:", "return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for", "def __init__(self,rootdir,use_pending=False): self.conn = sqlite3.connect(os.path.join(rootdir,'.dps_storage.db')) cursor = self.conn.cursor() sql = \"\"\" CREATE TABLE", "v, cached in self.value: rtrn[v] = os.path.basename(v) if full_path else '' for k,n", "if cc > (1 if chr(i) in StorageTree.default_mode else 0): raise RuntimeError(\"Invalid argument", "\"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len <", "address = nom['address'] try: city = unidecode.unidecode(address['city']) except: city = None try: state", "\"\"\" cursor.execute(sql) rows = cursor.fetchall() print rows def flush(self): if len(self.pending) > 0:", "self.value = [] def isLeaf(self): return len(self.children)==0 def add(self,k): if k is None:", "non_cached) def size(self): return len(self.value) + len(self.children) class StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head", "state, city,filename) VALUES \" results = [] sqlext = [] for (k,(f,v)) in", "\"INSERT INTO storage(hash, year, month, country, state, city,filename) VALUES \" results = []", "__init__(self,exifraw): if 0x0132 in exifraw: self.year = exifraw[0x0132][:4] self.month = exifraw[0x0132][5:7] else: self.year", "degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes", "= \" in %s\"%(str(elapsed)[:7]) elif i > 2: rate = float(i)/elapsed.total_seconds() predict =", "not dry_run: try: shutil.move(k,dst) except Exception as e: print str(e) def env(key,default): try:", "StorageTree: default_mode='ymcsl' def __init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems(): node =", "degrees(raw,neg): ((degreesNumerator, degreesDenominator), (minutesNumerator, minutesDenominator), (secondsNumerator, secondsDenominator)) = raw Degrees = (float(degreesNumerator) /", "before collapsing\") parser.add_argument('--order',default=StorageTree.default_mode,help=\"Default directory structure. Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country;", "= (filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor() sql", "self.conn.commit() #print \"insert \"+str(h) return s def put(self,pathname,s): h = self.make_hash(pathname) filename =", "(city,state,country) def move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google,", "country = GeoCoderGoogle.address_part(address,'country') self.loc=(city,state,country) @staticmethod def address_part(address,key): for d in address: if key", "city FROM storage WHERE hash = ? ; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone()", "; \"\"\" cursor.execute(sql,(h,)) row = cursor.fetchone() #print \"found \"+str(row) if row is None:", "def dict(self,full_path): rtrn = {} for v, cached in self.value: rtrn[v] = os.path.basename(v)", "(n.size() < minimum) and (current >= level): if show_collapse: print \"merging %s at", "if lfileext(filename) in ('.jpg','.jpeg')] progress = Progress(all_files,fd) for pathname in progress: s =", "return int(val) else: return val except: return default def main(): try: # create", "if show_collapse: storage_tree.dump(show_cached) print 'collapsing {} levels at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached)", "Key. Specify this key to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing", "print \"merging %s at %d with %s\"%(k,current,n.children.keys()) self.merge(n) del self.children[k] def dict(self,full_path): rtrn", "'*(last_len-this_len)) else: last_len = this_len self.fd.flush() if self.fd: self.fd.write('\\n') self.fd.flush() else: print s", "s = Storage(None) (s.year, s.month, s.country, s.state, s.city) = row s.cached = True", "(sub_cached, sub_non_cached) = n.count_cached() cached += sub_cached non_cached += sub_non_cached return (cached, non_cached)", "(non_cached > 0) or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0", "text, city text, filename text ); \"\"\" cursor.execute(sql) self.conn.commit() #print \"created table storage\"", "#print \"found \"+str(row) if row is None: return None s = Storage(None) (s.year,", "self[h] return s is not None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h)", "/ rate) predict_display = \" (%.2f fps, %s remaining)\"%(rate,str(predict)[:7]) else: predict_display = ''", "print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached > 0)", "is not None def get(self,pathname): h = self.make_hash(pathname) return self.__getitem__(h) def make_hash(self,pathname): mtime", "import sqlite3 import requests import json import unidecode import argparse class Storage: def", "show_cached: print prefix+os.path.basename(v) for k,n in self.children.iteritems(): cached,non_cached = n.count_cached() if (non_cached >", "this key to use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to", "address with reverse geocoding gmaps = googlemaps.Client(key=key) reverse_geocode_result = gmaps.reverse_geocode((lat,lon)) address = reverse_geocode_result[0]['address_components']", "os.path.join(root,os.path.dirname(v))+\"/\" #print \"Making directory %s\"%d if not dry_run: try: os.makedirs(os.path.dirname(d)) except Exception as", "year, month, country, state, city,filename) VALUES \" results = [] sqlext = []", "is DropBoxPhotoSorter\" #cache_.dump() return 0 except Exception as e: print e import traceback", "secondsDenominator)) = raw Degrees = (float(degreesNumerator) / float(degreesDenominator)) Minutes = (float(minutesNumerator) / float(minutesDenominator))", "return None s = Storage(None) (s.year, s.month, s.country, s.state, s.city) = row s.cached", "= Progress(all_files,fd) for pathname in progress: s = cache.get(pathname) if cache else None", "sql += \",\".join(sqlext) sql += \";\" cursor.execute(sql,results) self.conn.commit() self.pending = {} class Progress:", "move_files(args): (root, storage_levels, storage_min, show_collapse, dry_run, mode, google, show_cached) = (args.directory,args.storage_levels,args.storage_min,args.show_collapse,args.dry_run,args.order,args.google, args.show_cached) storage_tree", "at least {} entries'.format(storage_levels,storage_min) storage_tree.collapse(storage_levels,storage_min,show_collapse) storage_tree.dump(show_cached) for k,v in storage_tree.dict(True).iteritems(): d = os.path.join(root,os.path.dirname(v))+\"/\"", "s = \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if", "Node: prefixsz = 8 def __init__(self): self.children={} self.value = [] def isLeaf(self): return", "else None if s is None: s = Storage(pathname) try: im = Image.open(pathname)", "Must be permutation of 'YMCSL'. Y=Year; M=Month; C=Country; S=State; L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API", "lambda f: os.path.splitext(f)[1].lower() all_files = [os.path.join(d,filename) for d, _, files in os.walk(root) for", "?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city, filename))", "use Google Maps reverse geo-code service\") parser.add_argument('directory',help=\"Directory containing photos to be rearranged\") parser.add_argument('--show-cached',default=env('SHOW_CACHED',False),action=\"store_true\",help=\"Show", "self.value)) non_cached = len(self.value) - cached for k,n in self.children.iteritems(): (sub_cached, sub_non_cached) =", "cursor = self.conn.cursor() sql = \"\"\" INSERT INTO storage(hash, year, month, country, state,", "nom = json.loads(r.text) address = nom['address'] try: city = unidecode.unidecode(address['city']) except: city =", "self.pending[h] = (filename,s) return s else: return self.__setitem__(h,(filename,s)) def dump(self): cursor = self.conn.cursor()", "def dump(self,show_cached=True): self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if", "k,v in stores.iteritems(): node = self.head for m in mode: node = node.add(v.item(m))", "None self.month = None if 0x8825 in exifraw: gpsraw = exifraw[0x8825] self.lat =", "int PRIMARY KEY, year text, month text, country text, state text, city text,", "self.head.dump(show_cached) def collapse(self,level,minimum,show_collapse): self.head.collapse(level,minimum,0,show_collapse) @classmethod def fromDirectory(cls,root,fd,cache,mode,google): stores = {} if fd: fd.write('Processing", "> 0: cursor = self.conn.cursor() sql = \"INSERT INTO storage(hash, year, month, country,", "Node() return self.children[k] def merge(self,n): for k,v in n.children.iteritems(): if k in self.children:", "= argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before", "add(self,k): if k is None: return self if not k in self.children: self.children[k]", "lst self.sz = len(self.lst) try: if fd.isatty(): self.fd = fd else: self.fd =", "subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items in subdirectory before collapsing\") parser.add_argument('--dry-run',default=env('DRY_RUN',False),action=\"store_true\",help=\"Calculate directory structure without", "* (Node.prefixsz * level) if self.value: for v, cached in self.value: if not", "L=Locality/City\") parser.add_argument('--google',default=env('GOOGLE_API_KEY',None),help=\"Google Maps API Key. Specify this key to use Google Maps reverse", "?, ?, ?, ?, ?); \"\"\" cursor.execute(sql, (h, s.year, s.month, s.country, s.state, s.city,", "PIL.ExifTags import sqlite3 import requests import json import unidecode import argparse class Storage:", "= [] for (k,(f,v)) in self.pending.iteritems(): sqlext.append(\"(?, ?, ?, ?, ?, ?, ?)\")", "args list parser = argparse.ArgumentParser() parser.add_argument('--storage-levels',type=int,default=env('STORAGE_LEVELS',2),help=\"Minimum number of subdirectories\") parser.add_argument('--storage-min',type=int,default=env('STORAGE_MIN',4),help=\"Minimum number of items", "None class Node: prefixsz = 8 def __init__(self): self.children={} self.value = [] def", "elements in directory structure\") args = parser.parse_args() # check the order args.order=args.order.lower() oc", "return {'year':self.year,'month':self.month,'country':self.country,'state':self.state,'city':self.city} def __str__(self): return str(self.dict()) def __getitem__(self,key): return self.dict()[key] def item(self,key): if", "or show_cached: print \"%s%s/\"%(prefix,k) n.dump(show_cached,level+1) def count_cached(self): cached = 0 non_cached = 0", "INTO storage(hash, year, month, country, state, city,filename) VALUES \" results = [] sqlext", "s def __contains__(self,h): s = self[h] return s is not None def get(self,pathname):", "= \"%d/%d%s\"%(i+1,self.sz,predict_display) if self.fd: back = chr(8)*last_len self.fd.write(back+s) this_len = len(s) if this_len", "__init__(self,stores,mode=default_mode): self.head = Node() for k,v in stores.iteritems(): node = self.head for m" ]
[ "3}') for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "= self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) == 1 assert (", "self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same", "self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}'", "def test_none_org(self): # check users w/ none of the above org self.test_user =", "= db.session.merge(self.test_user) # Check status during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated)", "world questionnaire banks :param eproms_or_tnth: controls which set of questionnairebanks are generated. As", "db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id", "expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur", "'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add',", "from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be looking", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish", "from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import", "from portal.models.intervention import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import OverallStatus from", "from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import Organization from", "3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db):", "user w/ same org association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name", "aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 ==", "now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have", "name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments):", "for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq)", "db.session.commit() enc = db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd =", "in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline", "if not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp", "( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol", "INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for", "backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/", "# metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not", "in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s =", "series of near real world questionnaire banks :param eproms_or_tnth: controls which set of", "status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s =", "def test_initial_recur_baseline_done(self): # backdate to be within the first recurrence window backdate, nowish", "0 def test_2nd_recur_due(self): # backdate so baseline q's have expired, and we within", "intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q", "banks :param eproms_or_tnth: controls which set of questionnairebanks are generated. As restrictions exist,", "# should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however,", "you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') #", "session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id,", "{'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on time self.bless_with_basics(", "started, should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'}", "set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed something on", "Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\":", "== 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected", "set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on", "the thank you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate,", "def test_metastatic_as_of_date(self): # backdating consent beyond expired and the status lookup date #", "subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur =", "metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user)", "recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user", "'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a", "TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a", "db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur =", "the user completed something on time, and nothing else # is due, should", "{'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments", "- relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline,", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self):", "as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only epic26 started, should see results", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm", "\"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"}", "metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for each group localized_org = Organization(name='localized')", "setdate=backdate, local_metastatic='localized') # provide treatment date outside of all recurrences tx_date = datetime(2000,", "inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but", "timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date)", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user)", "termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}')", "in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring", "for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq)", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb =", "ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol", "= { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc',", "import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from portal.models.user import", "org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback,", "6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\":", "# Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}',", "status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb)", "can't have the same instrument, it doesn't work to mix them. \"\"\" if", "assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6,", "invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi',", "status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "assert a_s.overall_status == OverallStatus.in_progress # with only epic26 started, should see results for", "assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so", "have the same instrument, it doesn't work to mix them. \"\"\" if eproms_or_tnth", "but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "= QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished both", "start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy false", "'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments", "metapro_id = metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for each group localized_org", "recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user", "consent beyond expired and the status lookup date # within a valid window", "OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def", "db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'} found =", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate(", "one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s", "expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish =", "== 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary',", "backdate so baseline q's have expired, and we within the first # recurrence", "# recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic')", "timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id,", "test_localized_as_of_date(self): # backdating consent beyond expired and the status lookup date # within", "toggle default setup - set biopsy false for test user self.login() self.test_user =", "setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class", "mix them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth':", "== 0 def test_2nd_recur_due(self): # backdate so baseline q's have expired, and we", "a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's have expired, and", "not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic',", "hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0, 3, 6,", "for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user)", "result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include", "Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from portal.models.user import get_user", "enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6", "({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent", "def test_site_ids(self): # bless org w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system", "a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial recurrence show due. a_s =", ") for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "_ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument,", "{ 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = {", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so", "expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'} found = [i['timepoint'] for", "Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management", "expected def test_site_ids(self): # bless org w/ expected identifier type wanted_system = 'http://pcctc.org/'", "setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "= associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as", "self.test_user = db.session.merge(self.test_user) # Check status during baseline window a_s_baseline = QB_Status( user=self.test_user,", "second recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic')", "assert not a_s.instruments_in_progress() # metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments ==", "33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for", "classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in", "self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s =", "\"current\" status for the initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb,", "= Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name in", "as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list of expected intruments needing attention", "a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired and the status lookup date", "Audit from portal.models.clinical_constants import CC from portal.models.encounter import Encounter from portal.models.identifier import Identifier", "local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def", "else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define", "7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq =", "== OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments", "ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id,", "backdating consent beyond expired and the status lookup date # within a valid", "if submitted nearly 3 months ago, during # baseline window backdated = nowish", "range(10)) timestamp = timestamp or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted # should include", "period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "`tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol')", "now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside of all", "and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so", "instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic", "User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now)", "from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}')", "portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol", "'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def", "local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same org association", "mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s", "nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id)", "with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic", "to be within the first recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "we are within the # second recurrence window backdate, nowish = associative_backdate( now=now,", "3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}',", "= db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month", "nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first()", "= QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit()", "now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in", "value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert not qstats.current_qbd() assert", "as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not", "a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized')", "instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): #", "def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "various qb iterations, confirm # time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9,", "assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished one, time remains for", "not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate,", "def test_localized_overdue(self): # if the user completed something on time, and nothing else", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all", "{ \"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} }", "self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats =", "iterations, confirm # time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics(", "instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider']))", "the initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due", "test_initial_recur_baseline_done(self): # backdate to be within the first recurrence window backdate, nowish =", "== str(OverallStatus.expired)) def test_none_org(self): # check users w/ none of the above org", "== OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment and instruments_in_progress # should", "self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses(", "\"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\",", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial window w/", "recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration == 0", "6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff", "submitted # should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 #", "def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few", "for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now)", "'/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] ==", "test Orgs and QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org =", "termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))):", "rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur],", "submitted # should include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) #", "role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month", "hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s", "as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for", "overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument)", "above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user)", "self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status']", "backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress',", "assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert", "== 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1", "provide treatment date outside of all recurrences tx_date = datetime(2000, 3, 12, 0,", "OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate,", "needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should", "= Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2)", "# backdate so baseline q's have expired, and we within the 2nd #", "assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier']))", "'eproms' # modify in child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp()", "27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring',", "portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire", "termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank(", "near real world questionnaire banks :param eproms_or_tnth: controls which set of questionnairebanks are", "in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb)", "setup\" eproms_or_tnth = 'eproms' # modify in child class to test `tnth` def", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\":", "== a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's have expired,", "3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user =", "# from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\":", "db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id", "metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) == 1", "test assessment_status\"\"\" from __future__ import unicode_literals # isort:skip from datetime import datetime from", "QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument", "the initial window w/ no questionnaires submitted # should include all from initial", "assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration == 0 def", "# backdate to be within the first recurrence window backdate, nowish = associative_backdate(", "initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due #", "self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock qr's from", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all q's expired, #", "import ROLE from portal.models.user import get_user from portal.system_uri import ICHOM from tests import", "'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result =", "have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate +", "( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users w/ none of the", "the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user =", "timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed #", "recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\":", "metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's", "backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "= aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'}", "for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq)", "now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6',", "TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb =", "test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user =", "(set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self):", "all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response", "be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent", "associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add", "should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self):", "window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user =", "treatment date outside of all recurrences tx_date = datetime(2000, 3, 12, 0, 0,", "each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\", "date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy false for", "name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in", "first # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate,", "should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/", "{'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world", "now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if submitted", "with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self):", "from tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed',", "# instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): #", "db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0]", "3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name)", "datetime import datetime from random import choice from string import ascii_letters from dateutil.relativedelta", "both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress',", "be within the first recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2))", "= db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now)", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur(", "= QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas", "subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert not qstats.current_qbd() assert not qstats.enrolled_in_classification(\"baseline\")", "test_localized_in_process(self): # User finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now)", "{ 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'}", "baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}')", "portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import", "\"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a biopsy", "we within the first # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "Define test Orgs and QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org", "self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy false for test user self.login()", "self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite')", "days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user", "now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "expired and the status lookup date # within a valid window should show", "a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial", "a series of near real world questionnaire banks :param eproms_or_tnth: controls which set", "== a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired and the status lookup", "relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated)", "\"\"\"Create a series of near real world questionnaire banks :param eproms_or_tnth: controls which", "# should include all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self):", "identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value =", "assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check", "mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}',", "# with all q's expired, # instruments_needing_full_assessment and instruments_in_progress # should be empty", "metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc',", "local_metastatic='metastatic') # add baseline QNRs, as if submitted nearly 3 months ago, during", "mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}')", "import db from portal.models.audit import Audit from portal.models.clinical_constants import CC from portal.models.encounter import", "mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s", "auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb:", "3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb =", "qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id,", "system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring',", "questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy false for test user", "instruments_needing_full_assessment and instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress()", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "questionnairebanks are generated. As restrictions exist, such as two QBs with the same", "def test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate, nowish = associative_backdate( now=now,", "assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's", "response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) == 1 assert", "staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle", "bundle['entry']] assert set(found) == expected def test_site_ids(self): # bless org w/ expected identifier", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments:", "so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user)", "ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id", "1}', termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4,", "= db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result ==", "= aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1", "start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb: qstats =", "assert response.status_code == 200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired))", "Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import OverallStatus", "== 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users w/", "map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur)", "OverallStatus.completed # shouldn't need full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not", "qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and", "beyond expired and the status lookup date # within a valid window should", "'Month 3', 'Month 6', 'Month 9'} found = [i['timepoint'] for i in bundle['entry']]", "assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the", "= 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org =", "from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from portal.models.user import get_user from", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\"", "mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db):", "a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) ==", "= {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}", "looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): #", "self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline',", "confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def", "a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to be within the first recurrence", "mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\":", "backdated = nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in", "instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "import get_user from portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate,", "import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol import", "'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4", "# Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}')", "confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self):", "due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial", "a valid window should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3))", "{ 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23',", "a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s", "metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate", "import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import pytest from", "'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected =", "portal.models.user import get_user from portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate", "qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration", "import datetime from random import choice from string import ascii_letters from dateutil.relativedelta import", "from various qb iterations, confirm # time points. nineback, nowish = associative_backdate( now=now,", "baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline')", "= associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's", "questionnaires submitted # should include all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4", "# User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments:", "setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress',", "3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\":", "classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments):", "= ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol)", "= datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date)", "qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id,", "expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id", "of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user", "class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms'", "if the user completed something on time, and nothing else # is due,", "baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s =", "backdate so the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user =", "CC from portal.models.encounter import Encounter from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION", "== bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress',", "4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\":", "association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value)", "status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s =", "import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from portal.models.audit import", "status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in", "self.login() # backdate outside of baseline window (which uses consent date) backdate, nowish", "invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from", "db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status ==", "start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q =", "backdate so baseline q's have expired, and we are within the # second", "QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished both on", "INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] )", "assert a_s.overall_status == OverallStatus.due # confirm list of expected intruments needing attention assert", "name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q", "_ in range(10)) timestamp = timestamp or datetime.utcnow() qr_document = { \"questionnaire\": {", "portal.models.role import ROLE from portal.models.user import get_user from portal.system_uri import ICHOM from tests", "rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}',", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial", "with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id),", "w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system]", "expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user", "instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db):", "== OverallStatus.due # confirm list of expected intruments needing attention assert (metastatic_baseline_instruments ==", "recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user", "instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline", "Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*(", "assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0]", "'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the", "self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0, 3, 6, 9):", "indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert", "== OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self):", "relativedelta from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb)", "staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1", "def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id(", "Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if", "assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate", "in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside of all recurrences tx_date =", "backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if submitted nearly", "db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id =", "= Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit()", "still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now)", "def test_secondary_recur_due(self): # backdate so baseline q's have expired, and we are within", "id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish", "not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank,", "return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or", "overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq", "db from portal.models.audit import Audit from portal.models.clinical_constants import CC from portal.models.encounter import Encounter", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate", "assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed something on time,", "mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "(metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed", "with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = {", "submitted nearly 3 months ago, during # baseline window backdated = nowish -", "in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker", "\"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s =", "as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all'))", "assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired and the status", "mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\":", "include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should", "unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name", "= db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb,", "timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial window w/ no questionnaires", "however, we should be looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration ==", "self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument,", "test_site_ids(self): # bless org w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system =", "['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first()", "metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite')", "nothing else # is due, should see the thank you message. backdate, nowish", "the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s", "instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog',", "baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date =", "mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish,", "for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final',", "import Audit from portal.models.clinical_constants import CC from portal.models.encounter import Encounter from portal.models.identifier import", "mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during baseline window a_s_baseline", "== OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of baseline window (which uses", "= QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument", "import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import", "no questionnaires submitted # should include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) ==", "assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users w/ none of", "mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`,", "questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first()", "doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a biopsy (i.e. event", "portal.models.encounter import Encounter from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization", "00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "== 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting", "local_metastatic='localized') # backdate so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate)", "for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq)", "expired, and we within the 2nd # recurrence window backdate, nowish = associative_backdate(", "codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert not qstats.current_qbd()", "0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq =", "nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s", "creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff)", "assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user", "classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in", "= 'eproms' # modify in child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup,", "# add baseline QNRs, as if submitted nearly 3 months ago, during #", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish =", "portal.models.clinical_constants import CC from portal.models.encounter import Encounter from portal.models.identifier import Identifier from portal.models.intervention", "sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from portal.models.audit import Audit from portal.models.clinical_constants", "\"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc", "authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add',", "\"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with", "for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s =", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self):", "Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\":", "a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's have expired, and we within", "== OverallStatus.due # w/ no questionnaires submitted # should include all from second", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the", "recurrences tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy',", "time, and nothing else # is due, should see the thank you message.", "enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence", "now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "should be looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1 def", "'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 =", "= associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same org", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user)", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs,", "classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify in child class", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring',", "db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org))", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of baseline", "mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\":", "start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q =", "`eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol", "assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr(", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now,", "datetime from random import choice from string import ascii_letters from dateutil.relativedelta import relativedelta", "local_metastatic='metastatic') # backdate so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate)", "a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so baseline q's have expired, and", "== set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should also be 'due' assert", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic',", "# Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}',", "rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur],", "default setup - set biopsy false for test user self.login() self.test_user = db.session.merge(self.test_user)", "q's have expired, and we are within the # second recurrence window backdate,", "portal.models.audit import Audit from portal.models.clinical_constants import CC from portal.models.encounter import Encounter from portal.models.identifier", "needing mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify in child class to", "hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}',", "associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth =", "with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a biopsy (i.e.", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self):", "assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user =", "'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = {", "current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert", "0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so baseline q's have", "within the first recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics(", "rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\":", "the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date", "to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): #", "\"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session ID\",", "six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\":", "QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument in", "== OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics(", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress',", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests", "def test_metastatic_on_time(self): # User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr( instrument_id='epic26',", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s =", "OverallStatus.due # in the initial window w/ no questionnaires submitted # should include", "= map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur =", "instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank(", "db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment", "= qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp,", "metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}',", "instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized',", "document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26',", "# within a valid window should show available assessments. backdate, nowish = associative_backdate(", "OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s =", "results for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment()", "full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): #", "2 (zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to be", "window w/ no questionnaires submitted # should include all from initial recur assert", "rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) #", "status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed", "in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite", "== OverallStatus.due # in the initial window w/ no questionnaires submitted # should", "message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate", "cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3,", "SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id,", "subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb =", "# User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now)", "or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME',", "assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) ==", "rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) #", "a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished one, time remains", "see results for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment()))", "status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed", "[i['timepoint'] for i in bundle['entry']] assert set(found) == expected def test_site_ids(self): # bless", "backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside of all recurrences", "name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in", "backdate so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user =", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now,", "during # baseline window backdated = nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by(", "user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status", "= 'eortc' for months_back in (0, 3, 6, 9): backdate, _ = associative_backdate(", "metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'}", "instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for", "uses consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "qr_document = { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\":", "should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') #", "doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user,", "a few mock qr's from various qb iterations, confirm # time points. nineback,", "the same classification can't have the same instrument, it doesn't work to mix", "the 2nd # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics(", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit()", "= {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real", "Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db):", "a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include", "= Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert", "== 200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self):", "{ \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\":", "bless org w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS']", "QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank,", "QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur", "appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): #", "within the # second recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1))", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate(", "a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1))", "test_no_start_date(self): # W/O a biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) #", "portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id,", "a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of baseline window (which", "biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup -", "assert a_s.overall_status == OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment and instruments_in_progress", "from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank,", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline", "assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so baseline q's have expired,", "db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc',", "recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') #", "should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): #", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment and", "0}', expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq", "with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name", "Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate(", "have expired, and we are within the # second recurrence window backdate, nowish", "datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user", "not a_s.instruments_in_progress() # metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite')))", "OverallStatus.due # confirm list of expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment()))", "def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic')", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank(", "doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert", "# is due, should see the thank you message. backdate, nowish = associative_backdate(", "nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument,", "start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}',", "mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login()", "setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from", "== OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate, nowish =", "st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank,", "== set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed something", "# should be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): #", "0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument)", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self):", "self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now)", "window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user =", "instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s", "Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}',", "in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during baseline", "found = [i['timepoint'] for i in bundle['entry']] assert set(found) == expected def test_site_ids(self):", "db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}',", "list of expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress()", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb", "now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0,", "from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired,", "instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same org association for", "and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert", "assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired", "instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic", "set(found) == expected def test_site_ids(self): # bless org w/ expected identifier type wanted_system", "w/ no questionnaires submitted # should include all from initial recur assert set(a_s.instruments_needing_full_assessment())", "indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite')", "qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr", "a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now)", "== OverallStatus.in_progress # with only epic26 started, should see results for both #", "= qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self):", "# Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}')", "overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq", "QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a biopsy (i.e. event start", "from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response", "= { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": {", "\"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\":", "initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration ==", "status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None,", "not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired and", "# should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self):", "0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument)", "== set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired", "associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same org association", "# Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}',", "backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "# Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id,", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list of expected", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted #", "and nothing else # is due, should see the thank you message. backdate,", "a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted # should include all from", "30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq =", "timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress", "mock qr's from various qb iterations, confirm # time points. nineback, nowish =", "have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate +", "nearly 3 months ago, during # baseline window backdated = nowish - relativedelta(months=2,", "eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O a biopsy (i.e. event start date),", "to test assessment_status\"\"\" from __future__ import unicode_literals # isort:skip from datetime import datetime", "the first recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate,", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate,", "import ResearchProtocol from portal.models.role import ROLE from portal.models.user import get_user from portal.system_uri import", "of baseline window (which uses consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4,", "window should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate,", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank(", "for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def", "of expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() #", "include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate", "(symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker',", "q's expired, # instruments_needing_full_assessment and instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment()", "backdate so baseline q's have expired, and we within the 2nd # recurrence", "= QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank,", "ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp)", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate,", "instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "w/ no questionnaires submitted # should include all from initial recur assert (set(a_s.instruments_needing_full_assessment())", "def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world questionnaire banks :param eproms_or_tnth:", "not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments", "[wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier(", "expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value", "hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add',", "assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due period", "hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user", "expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "def test_initial_recur_due(self): # backdate so baseline q's have expired, and we within the", "self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if submitted nearly 3 months", "from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response =", "hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr(", "== OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics(", "eproms_or_tnth = 'eproms' # modify in child class to test `tnth` def setUp(self):", "from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from", "w/ none of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic',", "= INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur]", "Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\":", "'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value)", "Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with", "qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb", "\"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with", "status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments =", "db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11'", "# backdating consent beyond expired and the status lookup date # within a", "not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished one, time", "= 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same org association for bundle", "as two QBs with the same classification can't have the same instrument, it", "self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due", "a_s.overall_status == OverallStatus.in_progress # with only epic26 started, should see results for both", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial window w/ no", "self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "== metastatic_3 # however, we should be looking at iteration 2 (zero index)!", "window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment()", "from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from portal.models.audit import Audit from", "in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring", "portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status", "= db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) ==", "research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument)", "= nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments:", "instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during", "User finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user =", "assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should also be", "status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for", "self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for", "metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org", "org association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff,", "event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy", "'Month 6', 'Month 9'} found = [i['timepoint'] for i in bundle['entry']] assert set(found)", "QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments):", "time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id", "as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user)", "instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): #", "eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol')", "= associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) #", "set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's have expired, and", "def test_localized_in_process(self): # User finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add',", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish =", "+ relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't", "questionnaire setup\" eproms_or_tnth = 'eproms' # modify in child class to test `tnth`", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self):", "status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress", "user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats", "localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur)", "OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment and instruments_in_progress # should be", "st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\":", "indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank,", "= Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish =", "show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate", "should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "- set biopsy false for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY,", "status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user)", "a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'})", "second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get(", "time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s", "setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user)", "enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb", "db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur =", "as if submitted nearly 3 months ago, during # baseline window backdated =", "doesn't work to mix them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif", "timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during baseline window a_s_baseline = QB_Status(", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list of expected intruments", "self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument", "qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress')", "not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert", "mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id =", "baseline q's have expired, and we are within the # second recurrence window", "assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world questionnaire", "subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\"", "a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm", "== metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's have expired, and we", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments", "generate a few mock qr's from various qb iterations, confirm # time points.", "test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate", "metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org =", "SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id", "questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb", "SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire setup\"", "mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur])", "mock_qr(instrument_id=instrument_id) # add staff user w/ same org association for bundle creation staff", "qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result =", "OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished", "not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now)", "def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with", "def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management =", "all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so", "rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}',", "doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp or datetime.utcnow() qr_document", "db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur", "mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] ==", "submitted # should include all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def", "bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert", "'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = {", "== len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class", "else # is due, should see the thank you message. backdate, nowish =", "TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map(", "mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq =", "for months_back in (0, 3, 6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back))", "# confirm list of expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert", "generated. As restrictions exist, such as two QBs with the same classification can't", "QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import", "test_all_expired_old_tx(self): self.login() # backdate outside of baseline window (which uses consent date) backdate,", "class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock qr's from various qb", "not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed something on time, and", "OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate,", "= associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back", "assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in", "6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\":", "import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import (", "# Check status during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status", "Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\":", "bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6', 'Month", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in", "finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user)", "hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside of all recurrences tx_date", "qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress')", "'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org = Organization.query.filter(", "# backdate so the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user", "setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if submitted nearly 3 months ago,", "name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument", "set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code", "QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None,", "test_none_org(self): # check users w/ none of the above org self.test_user = db.session.merge(self.test_user)", "== 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth))", "of questionnairebanks are generated. As restrictions exist, such as two QBs with the", "0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s =", "instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic", "db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should", "User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i,", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user)", "= db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def", "setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def", "termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur =", "expired, and we are within the # second recurrence window backdate, nowish =", "be expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26']", "== OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics(", "mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for", "= QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for", "super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock qr's", "# instruments_needing_full_assessment and instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment() assert not", "setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock", "so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user)", "should be expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate,", "assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list of", "backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "= db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status", "def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user)", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline", "self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1)", "Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}',", "# w/ no questionnaires submitted # should include all from second recur assert", "metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200", "associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if", "start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument)", "a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1))", "value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback,", "cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment", "- set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both", "recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in", "days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add baseline QNRs, as if submitted nearly 3", "a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired and the", "outside of baseline window (which uses consent date) backdate, nowish = associative_backdate( now=now,", "= { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi',", "INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status", "# bless org w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/'", "completed something on time, and nothing else # is due, should see the", "should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now)", "OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate, nowish = associative_backdate(", "assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of baseline window", "hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user", "on time, and nothing else # is due, should see the thank you", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments -", "# provide treatment date outside of all recurrences tx_date = datetime(2000, 3, 12,", "window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') # add", "setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb,", "QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import (", "st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock", "include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0", "assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's have expired,", "QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only epic26 started, should see", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm", "tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None,", "(set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now)", "backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same org association for bundle", "0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q =", "within the 2nd # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1))", "during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert", "ago, during # baseline window backdated = nowish - relativedelta(months=2, days=25) baseline =", "aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'} found", "no questionnaires submitted # should include all from second recur assert set(a_s.instruments_needing_full_assessment()) ==", "doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp or datetime.utcnow()", "\"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit()", "QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID,", "(localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished", "doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user", "# generate a few mock qr's from various qb iterations, confirm # time", "200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): #", "nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s", "https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur", "not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial recurrence show due. a_s", "biopsy false for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID,", "confirm # time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback,", "appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on time", "test_localized_overdue(self): # if the user completed something on time, and nothing else #", "as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress", "the # second recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics(", "current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6', 'Month 9'} found = [i['timepoint']", "research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4):", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite',", "self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23',", "db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments):", "now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3):", "# confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self):", "= db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self):", "now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "should include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration", "rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) #", "taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s =", "show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the", ":param eproms_or_tnth: controls which set of questionnairebanks are generated. As restrictions exist, such", "db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert", "baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}')", "a biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert not", "instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished", "associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "\"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter(", "set of questionnairebanks are generated. As restrictions exist, such as two QBs with", ") from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur", "setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due", "Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT", "on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user)", "9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user", "questionnaires submitted # should include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3)", "mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "and QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol)", "QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user =", "q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate", "choice from string import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope", "mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for", "db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add',", "self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "expired, # instruments_needing_full_assessment and instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment() assert", "= QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login()", "in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur)", "research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q", "four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized',", "research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q", "# with only epic26 started, should see results for both # instruments_needing_full_assessment and", "time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now)", "OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate,", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase):", "local_metastatic='localized') # provide treatment date outside of all recurrences tx_date = datetime(2000, 3,", "ROLE from portal.models.user import get_user from portal.system_uri import ICHOM from tests import TEST_USER_ID,", "# Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol)", "Encounter from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import Organization", "iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def", "backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments =", "# backdate outside of baseline window (which uses consent date) backdate, nowish =", "metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d',", "intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite", "child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def", "instrument_id = 'eortc' for months_back in (0, 3, 6, 9): backdate, _ =", "set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be looking at iteration 2 (zero", "eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format(", "db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id,", "a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def", "need full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self):", "from datetime import datetime from random import choice from string import ascii_letters from", "instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): #", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3',", "import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None,", "db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) #", "assert a_s.overall_status == OverallStatus.due # in the initial window w/ no questionnaires submitted", "bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb,", "from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration", "backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user", "assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished one,", "import relativedelta from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from", "not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still in", "qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments =", "iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so baseline q's", "Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with", "in the initial window w/ no questionnaires submitted # should include all from", "== OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User", "db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() #", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user)", "\"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey", "assert result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with", "'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id,", "classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q =", "associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's have expired", "assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now)", "status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not", "hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr(", "assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User", "} enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb =", "QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but", "q's have expired, and we within the first # recurrence window backdate, nowish", "is due, should see the thank you message. backdate, nowish = associative_backdate( now=now,", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\"", "with all q's expired, # instruments_needing_full_assessment and instruments_in_progress # should be empty assert", "rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks():", "cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}')", "for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from", "def test_all_expired_old_tx(self): self.login() # backdate outside of baseline window (which uses consent date)", "qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document,", "so baseline q's have expired, and we within the first # recurrence window", "of all recurrences tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7',", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q =", "# recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic')", "mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)},", "\"At expired, should be expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1))", ") from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE", "so the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user)", "window backdated = nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument", "self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user =", "= QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user)", "which set of questionnairebanks are generated. As restrictions exist, such as two QBs", "qr's from various qb iterations, confirm # time points. nineback, nowish = associative_backdate(", "assert set(found) == expected def test_site_ids(self): # bless org w/ expected identifier type", "none of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now)", "SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in", "db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline", "db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire setup\" eproms_or_tnth", "started, should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] ==", "1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments,", "'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments =", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def", "name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument", "a_s.overall_status == OverallStatus.due # confirm list of expected intruments needing attention assert (metastatic_baseline_instruments", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id,", "start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments,", "include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now)", "test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s", "'Month 9'} found = [i['timepoint'] for i in bundle['entry']] assert set(found) == expected", "import choice from string import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import", "def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "and we within the first # recurrence window backdate, nowish = associative_backdate( now=now,", "should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} ==", "should include all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login()", "for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb", "import unicode_literals # isort:skip from datetime import datetime from random import choice from", "# confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def", "class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self):", "6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur", "base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit()", "org = Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2", "qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db):", "include all from second recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics()", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate(", "24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur(", "now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress',", "with the same classification can't have the same instrument, it doesn't work to", "baseline q's have expired, and we within the first # recurrence window backdate,", "timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'}", "portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from portal.models.user import get_user from portal.system_uri", "SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from portal.models.audit", "Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\":", "assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress())", "other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now,", "localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for each group", "from portal.models.role import ROLE from portal.models.user import get_user from portal.system_uri import ICHOM from", "len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() ==", "test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now)", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring',", "mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user)", "ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc", "mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}')", "name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org)", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id,", "def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb)", "iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp", "group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ #", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb =", "qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during baseline window a_s_baseline =", "instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic", "due, should see the thank you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "setup - set biopsy false for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation(", "'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'}", "have expired, and we within the 2nd # recurrence window backdate, nowish =", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with", "import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from", "== OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics(", "50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "initial window w/ no questionnaires submitted # should include all from initial recur", "TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None,", "classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q", "local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb)", "now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "== len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb", "QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument", "metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's have expired, and we are", "import NoResultFound from portal.extensions import db from portal.models.audit import Audit from portal.models.clinical_constants import", "= [wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1 =", "id_value = '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system,", "Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\":", "a_s.instruments_in_progress() # metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base", "and we within the 2nd # recurrence window backdate, nowish = associative_backdate( now=now,", "= {'Baseline', 'Month 3', 'Month 6', 'Month 9'} found = [i['timepoint'] for i", "self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status']) ==", "only epic26 started, should see results for both # instruments_needing_full_assessment and instruments_in_progress assert", "len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup):", "st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for", "finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user", "== localized_instruments) def test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add',", "for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s =", "if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration =", "isort:skip from datetime import datetime from random import choice from string import ascii_letters", "date # within a valid window should show available assessments. backdate, nowish =", "status lookup date # within a valid window should show available assessments. backdate,", "# backdate so baseline q's have expired, and we are within the #", "no questionnaires submitted # should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) ==", "backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish)", "str(OverallStatus.expired)) def test_none_org(self): # check users w/ none of the above org self.test_user", "qb iterations, confirm # time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1))", "# shouldn't need full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all')", "add baseline QNRs, as if submitted nearly 3 months ago, during # baseline", "len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb =", "(localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized", "now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same org association for", "mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock qr's from various", "research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6):", "assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\"", "questionnaire banks :param eproms_or_tnth: controls which set of questionnairebanks are generated. As restrictions", "classification can't have the same instrument, it doesn't work to mix them. \"\"\"", "'tnth' def test_no_start_date(self): # W/O a biopsy (i.e. event start date), no questionnaries", "and we are within the # second recurrence window backdate, nowish = associative_backdate(", "def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s =", "staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3',", "30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq =", "backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3:", "to mix them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth ==", "aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role", "questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments", "'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one()", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id,", "qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed", "status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now)", "local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due", "# Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}',", "qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in", "doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress", "w/ no questionnaires submitted # should include all from second recur assert set(a_s.instruments_needing_full_assessment())", "questionnaires submitted # should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3", "from portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow()", "test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user,", "check users w/ none of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login()", "instrument, it doesn't work to mix them. \"\"\" if eproms_or_tnth == 'eproms': return", "bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff =", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish =", "qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with", "enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit()", "= associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s =", "mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "# User finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user", "indefinite should also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def", "baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert", "expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2)", "the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date", "# should include all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm", "== metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code ==", "metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol =", "= Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}',", "timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user =", "= self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle =", "should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress()", "classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in", "results for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert", "a_s.overall_status == OverallStatus.completed # shouldn't need full or any inprocess assert not a_s.instruments_needing_full_assessment('all')", "baseline QNRs, as if submitted nearly 3 months ago, during # baseline window", "something on time, and nothing else # is due, should see the thank", "a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired and the status lookup date", "'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create", "on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb =", "same instrument, it doesn't work to mix them. \"\"\" if eproms_or_tnth == 'eproms':", "metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit()", "backdate so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user =", "3', 'Month 6', 'Month 9'} found = [i['timepoint'] for i in bundle['entry']] assert", "= datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not", "import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import", "{'Baseline', 'Month 3', 'Month 6', 'Month 9'} found = [i['timepoint'] for i in", "four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\":", "a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1))", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments:", "in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user =", "a_s.overall_status == OverallStatus.due # in the initial window w/ no questionnaires submitted #", "assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate,", "available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate", "but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline')", "user completed something on time, and nothing else # is due, should see", "use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit()", "= db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define", "portal.models.organization import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline", "3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq =", "mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress", "unicode_literals # isort:skip from datetime import datetime from random import choice from string", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login() backdate, nowish", "self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr(", "0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument)", "start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q =", "TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms' #", "QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) #", "l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\":", "'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world questionnaire banks", "Check status during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status ==", "in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb)", "def test_2nd_recur_due(self): # backdate so baseline q's have expired, and we within the", "# in the initial window w/ no questionnaires submitted # should include all", "iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user =", "assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth'", "outside of all recurrences tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000)", "valid window should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(", "within a valid window should show available assessments. backdate, nowish = associative_backdate( now=now,", "OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate,", "window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user =", "= QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id(", "# confirm iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so", "id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1)", "'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 =", "associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have", "3, 6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) # add", "nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date", "= QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by(", "= db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline',", "portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def", "enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb)", "with only epic26 started, should see results for both # instruments_needing_full_assessment and instruments_in_progress", "as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert", "within the first # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1))", "db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name)", "as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def", "overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate(", "TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None,", "nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc'", "1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\":", "'146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value)", "db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) expected = {'Baseline', 'Month 3', 'Month 6',", "for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status", "\"\"\"Module to test assessment_status\"\"\" from __future__ import unicode_literals # isort:skip from datetime import", "backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1))", "we should be looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1", "backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr(instrument_id='epic26',", "= backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress #", "localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id #", "and the status lookup date # within a valid window should show available", "# check users w/ none of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0))", "false for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID),", "i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user", "['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11')", "for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq)", "test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result", "name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb", "= INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}' )", "a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): #", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At", "QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument", "setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import", "Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur =", "= Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc)", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb)", "and instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def", "assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating", "local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due #", "two QBs with the same classification can't have the same instrument, it doesn't", "expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2)", "classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_baseline_instruments):", "for i in bundle['entry']] assert set(found) == expected def test_site_ids(self): # bless org", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\":", "value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1))", "submitted # should include all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def", "mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT", "both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb', timestamp=now) self.test_user =", "index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to be within the", "1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur(", "should see the thank you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1))", "timestamp or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format(", "mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s", "SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id) localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc',", "appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self):", "exist, such as two QBs with the same classification can't have the same", "(1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0])", "metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur)", "= db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb =", "'eortc' for months_back in (0, 3, 6, 9): backdate, _ = associative_backdate( now=now,", "time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by(", "questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic')", "timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def", "= associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb = QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for", "localized_instruments) def test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now)", "1 def test_initial_recur_baseline_done(self): # backdate to be within the first recurrence window backdate,", "hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db):", "in bundle['entry']] assert set(found) == expected def test_site_ids(self): # bless org w/ expected", "Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse,", "doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10))", "def test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms`", "(id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog',", "import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import", "== 1 def test_initial_recur_baseline_done(self): # backdate to be within the first recurrence window", "\"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks()", "setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26',", "QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) #", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_in_progress_expired(self): self.login()", "0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument)", "# however, we should be looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration", "not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol", "# hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user)", "OverallStatus.in_progress # with only epic26 started, should see results for both # instruments_needing_full_assessment", "baseline q's have expired, and we within the 2nd # recurrence window backdate,", "get_user from portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now =", "so baseline q's have expired, and we are within the # second recurrence", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur( start='{\"months\": 3}',", "instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s", "locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for", "mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated',", "qnr_document_id, ) from portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import", "== {'eproms_add'}) assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on time", "recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}',", "3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "# backdate so the baseline q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login()", "recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's have", "QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr) db.session.commit() invalidate_users_QBT(user_id=user_id)", "metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs", "assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be looking at iteration 2", "Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol)", "overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq", "from portal.models.audit import Audit from portal.models.clinical_constants import CC from portal.models.encounter import Encounter from", "metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\":", "mock_qr(instrument_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need full or any", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for", "== OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert", "= QuestionnaireBank( name='metastatic', classification='baseline', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank,", "status during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed", "confirm appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): #", "= 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name ==", "instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def", "window (which uses consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics(", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr(", "( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from", "now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's have expired mock_qr(instrument_id='epic23',", "relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only epic26", "both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb", "= '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary',", "= QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user", "test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s =", "be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if", "staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id],", "raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base", "staff user w/ same org association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter(", "as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\" self.login()", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup):", "setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0, 3, 6, 9): backdate,", "portal.extensions import db from portal.models.audit import Audit from portal.models.clinical_constants import CC from portal.models.encounter", "assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized', setdate=now) self.test_user = db.session.merge(self.test_user) #", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due # confirm list of expected intruments needing", "self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org = Organization.query.filter( Organization.name == 'metastatic').one() id1", "finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now)", "setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue", "QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no", "consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') #", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb =", "enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3", "= 'tnth' def test_no_start_date(self): # W/O a biopsy (i.e. event start date), no", "now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class", "result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound):", "= Recur( start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}',", "from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so baseline", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr(", "rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with", "000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress',", "db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur)", "pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db from portal.models.audit import Audit", "controls which set of questionnairebanks are generated. As restrictions exist, such as two", "= associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s =", "setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due #", "self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s =", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb = QuestionnaireBank( name='metastatic', classification='baseline',", "as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished both on time", "mock_qr(instrument_id=instrument_id, timestamp=backdate) # add staff user w/ same org association for bundle creation", "NoResultFound from portal.extensions import db from portal.models.audit import Audit from portal.models.clinical_constants import CC", "both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self):", "`{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol =", "recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth", "class TestTnthQB_Status(TestQuestionnaireSetup): \"\"\"Tests with Tnth QuestionnaireBanks\"\"\" eproms_or_tnth = 'tnth' def test_no_start_date(self): # W/O", "Recurrence st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb)", "in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring", "so baseline q's have expired, and we within the 2nd # recurrence window", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user =", "in child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup):", "= QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for", "start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q", "portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire,", "local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s", "for the initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth):", "def test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress',", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate", "test_initial_recur_due(self): # backdate so baseline q's have expired, and we within the first", "same org association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one())", "db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments)", "metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline", "restrictions exist, such as two QBs with the same classification can't have the", "# modify in child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth)", "localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol =", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4',", "set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired and", "db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management", "Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\": 0}', expired='{\"years\": 50}') for", "localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}',", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish", "expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic", "instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\":", "user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb: qstats", "baseline window backdated = nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for", "test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user", "date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide", "= ''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp or datetime.utcnow() qr_document =", "metastatic_org = map( db.session.merge, (localized_org, metastatic_org)) three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur", "real world questionnaire banks :param eproms_or_tnth: controls which set of questionnairebanks are generated.", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb =", "a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate, nowish", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in", "# Metastatic recurring 3 mr3_qb = QuestionnaireBank( name='metastatic_recurring3', classification='recurring', research_protocol_id=metapro_id, start='{\"days\": 0}', overdue='{\"days\":", "def test_no_start_date(self): # W/O a biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value)", "['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired and the status", "date outside of all recurrences tx_date = datetime(2000, 3, 12, 0, 0, 00,", "add staff user w/ same org association for bundle creation staff = self.add_user(username='staff')", "on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26',", "== 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result", "of near real world questionnaire banks :param eproms_or_tnth: controls which set of questionnairebanks", "test_boundary_expired(self): \"At expired, should be expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank", "i in bundle['entry']] assert set(found) == expected def test_site_ids(self): # bless org w/", "questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user", "assert not a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on time self.bless_with_basics( local_metastatic='metastatic',", "for test classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify in", "from __future__ import unicode_literals # isort:skip from datetime import datetime from random import", "rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) #", "rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) #", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the", "== metastatic_3) # confirm iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): #", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert not", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(l_qb) db.session.add(mb_qb) db.session.add(mi_qb)", "backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s", "instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id,", "assert a_s.overall_status == OverallStatus.completed # shouldn't need full or any inprocess assert not", "QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\"", "Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff)", "baseline window (which uses consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1))", "OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not", "for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq)", "self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert", "local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired def", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished", "for _ in range(10)) timestamp = timestamp or datetime.utcnow() qr_document = { \"questionnaire\":", "test_boundary_in_progress_expired(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for", "1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users w/ none", "enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq) # Metastatic baseline mb_qb", "months_back in (0, 3, 6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id,", "localized_instruments def test_localized_in_process(self): # User finished one, time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now)", "and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self):", "import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, )", "iteration=None, status='in-progress') assert result == 'two11' def test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb =", "setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress',", "\"official\", \"label\": \"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc =", "self.test_user = db.session.merge(self.test_user) # confirm appropriate instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment())", "initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be looking at", "timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login()", "are within the # second recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6,", "portal.models.recur import Recur from portal.models.research_protocol import ResearchProtocol from portal.models.role import ROLE from portal.models.user", "timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for _", "role_name=ROLE.STAFF.value) staff = db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert", "valid window should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate,", "'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add',", "As restrictions exist, such as two QBs with the same classification can't have", "db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs and QuestionnaireBanks", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.in_progress def test_boundary_recurring_in_progress(self): self.login() backdate, nowish", "six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name", "all from initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be", "metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user = db.session.merge(self.test_user) # Check status during baseline window", "= {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add',", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite', classification='indefinite', research_protocol_id=metapro_id, start='{\"days\":", "enc = db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd()", "return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks():", "recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q", "a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only epic26 started,", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate", "three_q_recur = db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb", "ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols", "timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "= db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need", "db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker", "a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due", "confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on", "w/ same org association for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name ==", "finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26',", "db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\":", "st_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur)", "{ \"questionnaire\": { \"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\":", "(which uses consent date) backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate,", "timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s =", "ResearchProtocols localized_protocol = ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol", "shouldn't need full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def", "= Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}',", "setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same org", "see results for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23'] == a_s.instruments_in_progress() assert", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's", "system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db):", "'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/ same org association for bundle creation", "not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp or", "therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "# User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26', timestamp=now) mock_qr(instrument_id='comorb',", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic recurring 3 mr3_qb = QuestionnaireBank(", "db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def test_localized_using_org(self): self.bless_with_basics(local_metastatic='localized',", "enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4", "SessionScope(db): db.session.add(enc) db.session.commit() enc = db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp)", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr3_qb.questionnaires.append(qbq) # Metastatic recurring 4 mr4_qb", "== len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir()", "self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def", "in range(10)) timestamp = timestamp or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\":", "flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound from portal.extensions import db", "test_secondary_recur_due(self): # backdate so baseline q's have expired, and we are within the", "name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument", "0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument)", "3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should", "type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11'", "= Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 =", "SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge, (localized_org,", "as_of_date=now) assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now,", "all q's expired, # instruments_needing_full_assessment and instruments_in_progress # should be empty assert not", "setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user)", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment()", "6', 'Month 9'} found = [i['timepoint'] for i in bundle['entry']] assert set(found) ==", "from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, ) from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses,", "at iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate", "assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self):", "mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress',", "2nd # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=backdate,", "hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' mock_qr(instrument_id=instrument_id) # add staff user w/", "from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from", "rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr6_qb.questionnaires.append(qbq) with", "q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to", "baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date =", "from portal.models.user import get_user from portal.system_uri import ICHOM from tests import TEST_USER_ID, TestCase,", "with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org, metastatic_org = map( db.session.merge,", "# backdate so baseline q's have expired, and we within the first #", "__future__ import unicode_literals # isort:skip from datetime import datetime from random import choice", "from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from", "assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial recurrence show due.", "rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing", "test_boundary_recurring_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb", "iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb,", "db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 == len(bundle['entry'])", "= localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for each", "= QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm appropriate instruments assert (localized_instruments", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all')", "expired, and we within the first # recurrence window backdate, nowish = associative_backdate(", "30}', expired='{\"months\": 3}', recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq", "org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics(", "for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org)", "backdate to be within the first recurrence window backdate, nowish = associative_backdate( now=now,", "qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration) with SessionScope(db): db.session.add(qr)", "with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring',", "12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user)", "assert a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted # should include all", "for bundle creation staff = self.add_user(username='staff') staff.organizations.append(Organization.query.filter( Organization.name == 'metastatic').one()) self.promote_user(staff, role_name=ROLE.STAFF.value) staff", "\"cPRO survey session ID\", \"value\": doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned',", "TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank( name='symptom_tracker', classification='baseline',", "'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of", "'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world questionnaire banks :param", "QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse(", "response.status_code == 200 assert len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def", "self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user,", "window should show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic')", "a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add',", "# isort:skip from datetime import datetime from random import choice from string import", "{ 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 =", "start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q =", "start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q =", "we within the 2nd # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=9,", "qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result", "status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s =", "(1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert (id1.as_fhir() == bundle['entry'][0]['subject']['careProvider'][0] ['identifier'][0]) class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self):", "QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert", "a_s.overall_status == OverallStatus.partially_completed # with all q's expired, # instruments_needing_full_assessment and instruments_in_progress #", "work to mix them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth", "lookup date # within a valid window should show available assessments. backdate, nowish", "''.join(choice(ascii_letters) for _ in range(10)) timestamp = timestamp or datetime.utcnow() qr_document = {", "for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) l_qb.questionnaires.append(qbq)", "from portal.models.organization import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status import QB_Status from", "no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set biopsy false for test", "assessment_status\"\"\" from __future__ import unicode_literals # isort:skip from datetime import datetime from random", "== OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial recurrence", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq) # Metastatic recurring 6 mr6_qb = QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id,", "random import choice from string import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest", "test_2nd_recur_due(self): # backdate so baseline q's have expired, and we within the 2nd", "backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user)", "mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): #", "= QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline", "from random import choice from string import ascii_letters from dateutil.relativedelta import relativedelta from", "timestamp = timestamp or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional questions\",", "db.session.merge(self.test_user) # Check status during baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert", "# second recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate,", "tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM,", "= associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s =", "a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need full or", "thank you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized')", "instrument in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish-relativedelta(days=1)) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "= db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define test Orgs and", "def test_aggregate_response_timepoints(self): # generate a few mock qr's from various qb iterations, confirm", "should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user = db.session.merge(self.test_user) a_s = QB_Status(user=user,", "start='{\"days\": 0}', expired='{\"years\": 50}') for rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq", "set biopsy false for test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE,", "mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb = db.session.merge(qb) result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id,", "db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id =", "result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') assert result == 'two11' def", "Orgs and QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic')", "q's have expired, and we within the 2nd # recurrence window backdate, nowish", "baseline window a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not", "same classification can't have the same instrument, it doesn't work to mix them.", "= QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank,", "db.session.merge(three_q_recur) four_q_recur = db.session.merge(four_q_recur) six_q_recur = db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank(", "in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb = QuestionnaireBank(", "all recurrences tx_date = datetime(2000, 3, 12, 0, 0, 00, 000000) self.add_procedure(code='7', display='Focal", "'epic23', 'prems'} symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series", "assert a_s.overall_status == OverallStatus.due def test_boundary_overdue(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3,", "org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline", "test_metastatic_on_time(self): # User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in", "name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog', qb=mi_qb, timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "= QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.overdue def test_boundary_expired(self): \"At expired, should be", "Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\":", "= db.session.merge(staff) bundle = aggregate_responses( instrument_ids=[instrument_id], current_user=staff) id1 = db.session.merge(id1) assert 1 ==", "id1 = db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1", "== expected def test_site_ids(self): # bless org w/ expected identifier type wanted_system =", "['doc-23'] == a_s.instruments_in_progress() assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's have", "== localized_instruments def test_localized_in_process(self): # User finished one, time remains for other self.bless_with_basics(local_metastatic='localized',", "assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken, but still", "QBs with the same classification can't have the same instrument, it doesn't work", "confirm iteration 0 assert a_s.current_qbd().iteration == 0 def test_2nd_recur_due(self): # backdate so baseline", "all from initial recur assert (set(a_s.instruments_needing_full_assessment()) == metastatic_3) # confirm iteration 0 assert", "== OverallStatus.completed # shouldn't need full or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert", "`tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a", "0, 0, 00, 000000) self.add_procedure(code='7', display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s", "test_aggregate_response_timepoints(self): # generate a few mock qr's from various qb iterations, confirm #", "= associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so the baseline q's have", "recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_4 def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2')", "expired, should be expired\" self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics(", "assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized',", "set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should also be 'due' assert (metastatic_indefinite_instruments", "Whereas \"current\" status for the initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "import Encounter from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import", "symptom_tracker_instruments = {'epic26', 'eq5d', 'maxpc', 'pam'} def mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near", "self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status", "self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status ==", "db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\":", "db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT st_recur_qb = QuestionnaireBank( name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\":", "the status lookup date # within a valid window should show available assessments.", "classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments):", "users w/ none of the above org self.test_user = db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics(", "it doesn't work to mix them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks()", "'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond", "OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() # Whereas \"current\" status for the initial recurrence show", "(i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default setup - set", "from portal.models.clinical_constants import CC from portal.models.encounter import Encounter from portal.models.identifier import Identifier from", "3}') for rank, instrument in enumerate(metastatic_6): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to be within the first", "instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb', status='in-progress', doc_id='comorb',", "the same instrument, it doesn't work to mix them. \"\"\" if eproms_or_tnth ==", "assert ['doc-26'] == a_s.instruments_in_progress() def test_metastatic_as_of_date(self): # backdating consent beyond expired and the", "\"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user,", "start='{\"months\": 3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management =", "local_metastatic='localized') # backdate so the baseline q's have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate)", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no questionnaires", "{'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc',", "use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9,", "recurring 4 mr4_qb = QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}',", "mock_questionnairebanks(eproms_or_tnth): \"\"\"Create a series of near real world questionnaire banks :param eproms_or_tnth: controls", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur(", "also be 'due' assert (metastatic_indefinite_instruments == set(a_s.instruments_needing_full_assessment('indefinite'))) assert not a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): #", "expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq", "(localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur)", "eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else: raise", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all q's expired,", "OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of baseline window (which uses consent", "datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id:", "first recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, days=2)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic')", "name='symptom_tracker_recurring', classification='recurring', intervention_id=self_management.id, start='{\"days\": 0}', expired='{\"months\": 3}', recurs=[st_recur] ) for rank, instrument in", "# confirm appropriate instruments assert (localized_instruments - set(a_s.instruments_needing_full_assessment('all')) == {'eproms_add'}) assert not a_s.instruments_in_progress()", "'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'}", "wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] = [wanted_system] id_value = '146-11' org", "in localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "epic26 started, should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add',", "if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return mock_tnth_questionnairebanks() else:", "# 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238 three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur =", "setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.expired", "instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) # Metastatic", "(metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should also be 'due'", "= ResearchProtocol(name='localized_protocol') metastatic_protocol = ResearchProtocol(name='metastatic_protocol') with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol)", "initial recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 def test_secondary_recur_due(self): # backdate so baseline q's", "db.session.add(mi_qb) db.session.add(mr3_qb) db.session.add(mr4_qb) db.session.add(mr6_qb) db.session.commit() def mock_tnth_questionnairebanks(): for name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) #", "backdate outside of baseline window (which uses consent date) backdate, nowish = associative_backdate(", "localized_instruments: mock_qr( instrument_id=instrument, status='in-progress', timestamp=nowish) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "# time points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic')", "now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # backdate so the baseline q's have expired", "# if the user completed something on time, and nothing else # is", "expired='{\"months\": 3}' ) for rank, instrument in enumerate(symptom_tracker_instruments): q = Questionnaire.find_by_name(name=instrument) qbq =", "user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters) for _ in range(10)) timestamp =", "def test_metastatic_due(self): # hasn't taken, but still in OverallStatus.due period self.bless_with_basics(local_metastatic='metastatic', setdate=now) self.test_user", "system=unwanted_system, use='secondary', value=id_value) org.identifiers.append(id1) org.identifiers.append(id2) with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now,", "nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s", "QuestionnaireBank( name='metastatic_recurring4', classification='recurring', research_protocol_id=metapro_id, recurs=[four_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank,", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # confirm appropriate instruments", "name in (symptom_tracker_instruments): TestCase.add_questionnaire(name=name) # Symptom Tracker Baseline self_management = INTERVENTION.SELF_MANAGEMENT st_qb =", "Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank( name='metastatic_indefinite',", "associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside of", "\"display\": \"Additional questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO", "backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0, 3,", "Organization.query.filter( Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier(", "instruments a_s = QB_Status(user=self.test_user, as_of_date=now) assert (set(a_s.instruments_needing_full_assessment()) == localized_instruments) def test_localized_on_time(self): # User", "expired='{\"months\": 3}') for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q,", "setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr( instrument_id='epic26', status='in-progress', doc_id='epic26', timestamp=now) mock_qr( instrument_id='comorb',", "attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not a_s.instruments_in_progress() # metastatic indefinite should also", "localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol) metastatic_org = Organization(name='metastatic') metastatic_org.research_protocols.append(metastatic_protocol) # from https://docs.google.com/spreadsheets/d/\\ # 1oJ8HKfMHOdXkSshjRlr8lFXxT4aUHX5ntxnKMgf50wE/edit#gid=1339608238", "q's have expired mock_qr(instrument_id='epic23', status='in-progress', doc_id='doc-23', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate", "Organization.name == 'metastatic').one() id1 = Identifier( system=wanted_system, use='secondary', value=id_value) id2 = Identifier( system=unwanted_system,", "# Whereas \"current\" status for the initial recurrence show due. a_s = QB_Status(user=self.test_user,", "q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur", "= qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline", "portal.models.intervention import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import OverallStatus from portal.models.qb_status", "qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_qb.questionnaires.append(qbq) # Symptom Tracker Recurrence st_recur = Recur( start='{\"months\":", "QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank,", "test_metastatic_as_of_date(self): # backdating consent beyond expired and the status lookup date # within", "or `tntn`, not `{}`'.format( eproms_or_tnth)) def mock_eproms_questionnairebanks(): # Define base ResearchProtocols localized_protocol =", "such as two QBs with the same classification can't have the same instrument,", "remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "QNRs, as if submitted nearly 3 months ago, during # baseline window backdated", "\"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc)", "from string import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import", "= associative_backdate( now=now, backdate=relativedelta(months=4, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') # provide treatment date outside", "# backdate so the baseline q's have expired mock_qr(instrument_id='epic26', status='in-progress', doc_id='doc-26', timestamp=backdate) self.test_user", "3 months ago, during # baseline window backdated = nowish - relativedelta(months=2, days=25)", "points. nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id =", "time remains for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s =", "appropriate instruments assert not a_s.instruments_needing_full_assessment() assert set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User", "= QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog',", "assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user", "local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first() mock_qr(instrument_id='irondemog',", "3}') for rank, instrument in enumerate(localized_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "= QuestionnaireBank( name='metastatic_recurring6', classification='recurring', research_protocol_id=metapro_id, recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for", "(zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self): # backdate to be within", "instruments_in_progress # should be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self):", "should be empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating", "def test_localized_on_time(self): # User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) mock_qr(instrument_id='epic26',", "response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users w/ none of the above", "associative_backdate now = datetime.utcnow() def mock_qr( instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID):", "= timestamp or datetime.utcnow() qr_document = { \"questionnaire\": { \"display\": \"Additional questions\", \"reference\":", "start='{\"months\": 6}', cycle_length='{\"years\": 1}', termination='{\"months\": 33}') six_q_recur = Recur( start='{\"years\": 1}', cycle_length='{\"years\": 1}',", "OverallStatus.due # w/ no questionnaires submitted # should include all from second recur", "= db.session.merge(self.test_user) self.test_user.organizations.append(Organization.query.get(0)) self.login() self.bless_with_basics( local_metastatic='metastatic', setdate=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "= QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr =", "a_s_baseline = QB_Status( user=self.test_user, as_of_date=backdated) assert a_s_baseline.overall_status == OverallStatus.completed assert not a_s_baseline.instruments_needing_full_assessment() #", "QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need full or any inprocess", "test_qnr_id_missing(self): qb = QuestionnaireBank.query.first() qb = db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID,", "ResearchProtocol from portal.models.role import ROLE from portal.models.user import get_user from portal.system_uri import ICHOM", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need full", "metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc',", "be looking at iteration 2 (zero index)! assert a_s.current_qbd().iteration == 1 def test_initial_recur_baseline_done(self):", "SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic') instrument_id", "'tnth': return mock_tnth_questionnairebanks() else: raise ValueError('expecting `eproms` or `tntn`, not `{}`'.format( eproms_or_tnth)) def", "l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\": 3}') for", "W/O a biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle default", "self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument", "def test_enrolled_in_metastatic(self): \"\"\"metastatic should include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s", "recurs=[six_q_recur], start='{\"days\": 0}', overdue='{\"days\": 30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_6): q", "mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify in child class to test", "empty assert not a_s.instruments_needing_full_assessment() assert not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond", "test classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify in child", "TestAggregateResponses(TestQuestionnaireSetup): def test_aggregate_response_timepoints(self): # generate a few mock qr's from various qb iterations,", "self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress # confirm", "QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes", "import ICHOM from tests import TEST_USER_ID, TestCase, associative_backdate now = datetime.utcnow() def mock_qr(", "dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import pytest from sqlalchemy.orm.exc import NoResultFound", "db.session.merge(qb) with pytest.raises(NoResultFound): result = qnr_document_id( subject_id=TEST_USER_ID, questionnaire_bank_id=qb.id, questionnaire_name='irondemog', iteration=None, status='in-progress') def test_enrolled_in_metastatic(self):", "for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mr4_qb.questionnaires.append(qbq)", "modify in child class to test `tnth` def setUp(self): super(TestQuestionnaireSetup, self).setUp() mock_questionnairebanks(self.eproms_or_tnth) class", "assert a_s.instruments_needing_full_assessment() def test_initial_recur_due(self): # backdate so baseline q's have expired, and we", "'factfpsi'} metastatic_6 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} symptom_tracker_instruments = {'epic26',", "= QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) st_recur_qb.questionnaires.append(qbq) with SessionScope(db): db.session.add(st_recur_qb) db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test", "from portal.models.encounter import Encounter from portal.models.identifier import Identifier from portal.models.intervention import INTERVENTION from", "localized_instruments = {'eproms_add', 'epic26', 'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23',", "associative_backdate( now=now, backdate=relativedelta(months=6, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user,", "test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert len(response.json['status'])", "\"Base for test classes needing mock questionnaire setup\" eproms_or_tnth = 'eproms' # modify", "not a_s.instruments_in_progress() def test_localized_as_of_date(self): # backdating consent beyond expired and the status lookup", "timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.in_progress #", "three_q_recur = Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\":", "a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized should include baseline but not indefinite\"\"\" self.bless_with_basics(local_metastatic='localized') user =", "show available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') #", "have expired, and we within the first # recurrence window backdate, nowish =", "self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user =", "(0, 3, 6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate) #", "Recur( start='{\"months\": 3}', cycle_length='{\"months\": 6}', termination='{\"months\": 24}') four_q_recur = Recur( start='{\"months\": 6}', cycle_length='{\"years\":", "'comorb'} metastatic_baseline_instruments = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'}", "# toggle default setup - set biopsy false for test user self.login() self.test_user", "as_of_date=now) assert a_s.overall_status == OverallStatus.completed # shouldn't need full or any inprocess assert", "# confirm appropriate instruments assert not a_s.instruments_needing_full_assessment('all') def test_localized_inprogress_on_time(self): # User finished both", "self.bless_with_basics( local_metastatic='metastatic', setdate=now) for i in metastatic_baseline_instruments: mock_qr(instrument_id=i, timestamp=now) mi_qb = QuestionnaireBank.query.filter_by( name='metastatic_indefinite').first()", "them. \"\"\" if eproms_or_tnth == 'eproms': return mock_eproms_questionnairebanks() elif eproms_or_tnth == 'tnth': return", "from portal.extensions import db from portal.models.audit import Audit from portal.models.clinical_constants import CC from", "a_s.instruments_in_progress('indefinite') def test_localized_overdue(self): # if the user completed something on time, and nothing", "= { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 =", "= qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status, authored=timestamp, document=qr_document, encounter_id=enc.id, questionnaire_bank=qb, qb_iteration=iteration)", "have expired mock_qr( instrument_id='epic26', status='in-progress', timestamp=backdate) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "= QuestionnaireBank.query.filter_by( name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0)", "= db.session.merge(id1) assert 1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 ==", "confirm list of expected intruments needing attention assert (metastatic_baseline_instruments == set(a_s.instruments_needing_full_assessment())) assert not", "30}', expired='{\"months\": 3}') for rank, instrument in enumerate(metastatic_4): q = Questionnaire.find_by_name(name=instrument) qbq =", "or any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't", "1 == len(bundle['entry']) assert (1 == len(bundle['entry'][0]['subject']['careProvider'])) assert (1 == len(bundle['entry'][0]['subject']['careProvider'][0] ['identifier'])) assert", "self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert not", "db.session.commit() class TestQuestionnaireSetup(TestCase): \"Base for test classes needing mock questionnaire setup\" eproms_or_tnth =", "rank, instrument in enumerate(metastatic_indefinite_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mi_qb.questionnaires.append(qbq) #", "metastatic_3 # however, we should be looking at iteration 2 (zero index)! assert", "User finished both on time self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr( instrument_id='eproms_add', status='in-progress', doc_id='eproms_add', timestamp=now) mock_qr(", "9'} found = [i['timepoint'] for i in bundle['entry']] assert set(found) == expected def", "are generated. As restrictions exist, such as two QBs with the same classification", "see the thank you message. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics(", "import CC from portal.models.encounter import Encounter from portal.models.identifier import Identifier from portal.models.intervention import", "with SessionScope(db): db.session.commit() nineback, nowish = associative_backdate( now=now, backdate=relativedelta(months=9, hours=1)) self.bless_with_basics( setdate=nineback, local_metastatic='metastatic')", "'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6 = { 'eortc',", "user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert not a_s.enrolled_in_classification('indefinite') def", "timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now) assert a_s.overall_status == OverallStatus.completed #", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed def test_all_expired_old_tx(self): self.login() # backdate outside of", "= { 'eortc', 'eproms_add', 'ironmisc'} metastatic_4 = { 'eortc', 'eproms_add', 'ironmisc', 'factfpsi'} metastatic_6", "few mock qr's from various qb iterations, confirm # time points. nineback, nowish", "available assessments. backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3)) self.bless_with_basics(setdate=backdate, local_metastatic='metastatic') # backdate so", "import invalidate_users_QBT from portal.models.questionnaire import Questionnaire from portal.models.questionnaire_bank import ( QuestionnaireBank, QuestionnaireBankQuestionnaire, )", "associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') for instrument in localized_instruments: mock_qr( instrument_id=instrument,", "instrument_id, status='completed', timestamp=None, qb=None, doc_id=None, iteration=None, user_id=TEST_USER_ID): if not doc_id: doc_id = ''.join(choice(ascii_letters)", "db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id = localized_protocol.id metapro_id = metastatic_protocol.id # Define test", "enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db): db.session.add(enc) db.session.commit() enc =", "audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now) qstats = QB_Status(self.test_user, now) assert not qstats.current_qbd() assert not", "self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status ==", "set(a_s.instruments_in_progress()) == localized_instruments def test_localized_in_process(self): # User finished one, time remains for other", "epic26 started, should see results for both # instruments_needing_full_assessment and instruments_in_progress assert ['doc-23']", "eproms_or_tnth: controls which set of questionnairebanks are generated. As restrictions exist, such as", "# baseline window backdated = nowish - relativedelta(months=2, days=25) baseline = QuestionnaireBank.query.filter_by( name='metastatic').one()", "db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.partially_completed # with all q's", "mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish)", "timestamp=backdate) # add staff user w/ same org association for bundle creation staff", "with SessionScope(db): db.session.add(localized_protocol) db.session.add(metastatic_protocol) db.session.commit() localized_protocol = db.session.merge(localized_protocol) metastatic_protocol = db.session.merge(metastatic_protocol) locpro_id =", "recurs=[three_q_recur]) for rank, instrument in enumerate(metastatic_3): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "recur assert set(a_s.instruments_needing_full_assessment()) == metastatic_3 # however, we should be looking at iteration", "user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self):", "'eortc', 'eproms_add', 'ironmisc', 'factfpsi', 'epic23', 'prems'} metastatic_indefinite_instruments = {'irondemog'} metastatic_3 = { 'eortc',", "qbd = qstats.current_qbd() qb, iteration = qbd.questionnaire_bank, qbd.iteration qr = QuestionnaireResponse( subject_id=user_id, status=status,", "Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}', overdue='{\"days\": 7}', expired='{\"months\":", "def test_localized_as_of_date(self): # backdating consent beyond expired and the status lookup date #", "# Define test Orgs and QuestionnaireBanks for each group localized_org = Organization(name='localized') localized_org.research_protocols.append(localized_protocol)", "import Identifier from portal.models.intervention import INTERVENTION from portal.models.organization import Organization from portal.models.overall_status import", "include baseline and indefinite\"\"\" self.bless_with_basics(local_metastatic='metastatic') user = db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert", "a_s.instruments_in_progress() def test_metastatic_on_time(self): # User finished both on time self.bless_with_basics( local_metastatic='metastatic', setdate=now) for", "from portal.models.questionnaire_response import ( QuestionnaireResponse, aggregate_responses, qnr_document_id, ) from portal.models.recur import Recur from", "local_metastatic='metastatic') instrument_id = 'eortc' for months_back in (0, 3, 6, 9): backdate, _", "display='Focal therapy', system=ICHOM, setdate=tx_date) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "# instruments_needing_full_assessment and instruments_in_progress assert ({'eproms_add', 'comorb'} == set(a_s.instruments_needing_full_assessment())) assert ['doc-26'] == a_s.instruments_in_progress()", "QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # w/ no questionnaires submitted # should", "doc_id, \"system\": \"https://stg-ae.us.truenth.org/eproms-demo\"} } enc = Encounter( status='planned', auth_method='url_authenticated', user_id=user_id, start_time=timestamp) with SessionScope(db):", "status for the initial recurrence show due. a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status", "= Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank) mb_qb.questionnaires.append(qbq) # Metastatic indefinite mi_qb = QuestionnaireBank(", "org w/ expected identifier type wanted_system = 'http://pcctc.org/' unwanted_system = 'http://other.org/' self.app.config['REPORTING_IDENTIFIER_SYSTEMS'] =", "name='metastatic_recurring3').first() for instrument in metastatic_3: mock_qr( instrument_id=instrument, status='in-progress', qb=mr3_qb, timestamp=nowish, iteration=0) self.test_user =", "# W/O a biopsy (i.e. event start date), no questionnaries self.promote_user(role_name=ROLE.PATIENT.value) # toggle", "= QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with only epic26 started, should", "months ago, during # baseline window backdated = nowish - relativedelta(months=2, days=25) baseline", "OverallStatus from portal.models.qb_status import QB_Status from portal.models.qb_timeline import invalidate_users_QBT from portal.models.questionnaire import Questionnaire", "= db.session.merge(six_q_recur) # Localized baseline l_qb = QuestionnaireBank( name='localized', classification='baseline', research_protocol_id=locpro_id, start='{\"days\": 0}',", "def test_batch_lookup(self): self.login() self.bless_with_basics() response = self.client.get( '/api/consent-assessment-status?user_id=1&user_id=2') assert response.status_code == 200 assert", "questions\", \"reference\": \"https://{}/api/questionnaires/{}\".format( 'SERVER_NAME', instrument_id)}, \"identifier\": { \"use\": \"official\", \"label\": \"cPRO survey session", "any inprocess assert not a_s.instruments_needing_full_assessment('all') assert not a_s.instruments_in_progress('all') def test_metastatic_due(self): # hasn't taken,", "string import ascii_letters from dateutil.relativedelta import relativedelta from flask_webtest import SessionScope import pytest", "backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user, as_of_date=as_of_date) assert a_s.overall_status == OverallStatus.in_progress # with", "= db.session.merge(self.test_user) a_s = QB_Status(user=user, as_of_date=now) assert a_s.enrolled_in_classification('baseline') assert a_s.enrolled_in_classification('indefinite') def test_enrolled_in_localized(self): \"\"\"localized", "3}', cycle_length='{\"months\": 3}', termination='{\"months\": 27}') with SessionScope(db): db.session.add(st_qb) db.session.add(st_recur) db.session.commit() self_management = INTERVENTION.SELF_MANAGEMENT", "a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert a_s.overall_status == OverallStatus.due # in the initial window", "test user self.login() self.test_user = db.session.merge(self.test_user) self.test_user.save_observation( codeable_concept=CC.BIOPSY, value_quantity=CC.FALSE_VALUE, audit=Audit(user_id=TEST_USER_ID, subject_id=TEST_USER_ID), status='final', issued=now)", "baseline = QuestionnaireBank.query.filter_by( name='metastatic').one() for instrument in metastatic_baseline_instruments: mock_qr(instrument, qb=baseline, timestamp=backdated) self.test_user =", "a_s.overall_status == OverallStatus.expired def test_boundary_in_progress(self): self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=-1))", "the first # recurrence window backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=3, hours=1)) self.bless_with_basics(", "3}') for rank, instrument in enumerate(metastatic_baseline_instruments): q = Questionnaire.find_by_name(name=instrument) qbq = QuestionnaireBankQuestionnaire(questionnaire=q, rank=rank)", "= [i['timepoint'] for i in bundle['entry']] assert set(found) == expected def test_site_ids(self): #", "len(response.json['status']) == 1 assert ( response.json['status'][0]['consents'][0]['assessment_status'] == str(OverallStatus.expired)) def test_none_org(self): # check users", "doc_id='doc-26', timestamp=backdate) self.test_user = db.session.merge(self.test_user) as_of_date = backdate + relativedelta(days=2) a_s = QB_Status(user=self.test_user,", "for other self.bless_with_basics(local_metastatic='localized', setdate=now) mock_qr(instrument_id='eproms_add', timestamp=now) self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=now)", "db.session.merge(enc) if not qb: qstats = QB_Status(get_user(user_id), timestamp) qbd = qstats.current_qbd() qb, iteration", "backdate=relativedelta(months=3, hours=1)) self.bless_with_basics( setdate=backdate, local_metastatic='localized') self.test_user = db.session.merge(self.test_user) a_s = QB_Status(user=self.test_user, as_of_date=nowish) assert", "# add staff user w/ same org association for bundle creation staff =", "cycle_length='{\"years\": 1}', termination='{\"years\": 3, \"months\": 3}') for name in (localized_instruments.union(*( metastatic_baseline_instruments, metastatic_indefinite_instruments, metastatic_3,", "metastatic_3, metastatic_4, metastatic_6))): TestCase.add_questionnaire(name=name) with SessionScope(db): db.session.add(localized_org) db.session.add(metastatic_org) db.session.add(three_q_recur) db.session.add(four_q_recur) db.session.add(six_q_recur) db.session.commit() localized_org,", "self.login() backdate, nowish = associative_backdate( now=now, backdate=relativedelta(months=6, hours=-1)) self.bless_with_basics( setdate=backdate, local_metastatic='metastatic') mr3_qb =", "= metastatic_protocol.id # Define test Orgs and QuestionnaireBanks for each group localized_org =", "in (0, 3, 6, 9): backdate, _ = associative_backdate( now=now, backdate=relativedelta(months=months_back)) mock_qr(instrument_id=instrument_id, timestamp=backdate)", "class TestQB_Status(TestQuestionnaireSetup): def test_qnr_id(self): qb = QuestionnaireBank.query.first() mock_qr( instrument_id='irondemog', status='in-progress', qb=qb, doc_id='two11') qb" ]
[ "dir)): for file in files: callback(self, dir, file) ''' Get iamge data '''", "return full_width / count, full_height / count ''' Get width and height of", "dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2],", "''' def get_data_mean_size(self): full_width, full_height = 0, 0 count = 0 def dummy(self,", "as plt ''' This class stands for dataset and provides data processing oparations", "provides data processing oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir =", "get_data_mean_size(self): full_width, full_height = 0, 0 count = 0 def dummy(self, dir, file):", "get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data,", "March 26th 2018 Author: Huisama ----- Last Modified: Saturday March 31st 2018 11:08:21", "full_width, full_height = 0, 0 count = 0 def dummy(self, dir, file): nonlocal", "pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic", ": pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size +", "(width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height / count", "self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir ''' def lookup_dataset_dir(self, callback): for _,", "self.get_size(fullfile) full_width += width full_height += height print(\"%s, %s\" % (width, height)) count", "as np import PIL # STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH", "filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width", "dummy(self, dir, file): if file == 'dataset.txt': # open and read in with", "pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size =", "width and height of a single image ''' def get_size(self, image_file_path): img =", "DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio =", "pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size", "plt ''' This class stands for dataset and provides data processing oparations '''", "dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir,", "data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio =", "neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size =", "org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) '''", "%d\" % (self.negcount, self.poscount)) return True ''' Check if image has 4 channel", "fullfile = os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width += width full_height", "self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append((", "obj.next_batch(8) # while done != True: # print(data[0][0].dtype) # data, labels, done =", "if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after", "full_width += width full_height += height print(\"%s, %s\" % (width, height)) count +=", "int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total =", "e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data)", "file) img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy)", "scm import random import numpy as np import PIL # STD_WIDTH = 667", "as scm import random import numpy as np import PIL # STD_WIDTH =", "random_pos = batch_size - random_neg org_pos_data = [] org_neg_data = [] for _", "self.negcount = 0 def dummy(self, dir, file): if file == 'dataset.txt': # open", "of dataset ''' def get_data_mean_size(self): full_width, full_height = 0, 0 count = 0", "= 252 STD_HEIGHT = 40 import matplotlib.pyplot as plt ''' This class stands", "get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2", "= DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels =", "self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get mean width", "data ''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1,", "= list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data),", "dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data)", "splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount,", "if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount", "return img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data", "= int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total", "dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" %", "= scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data =", "= int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total", "data processing oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir", "''' Get mean width and height of dataset ''' def get_data_mean_size(self): full_width, full_height", "labels ''' Get test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels", "dir, file): filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir,", "STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot as plt ''' This class", "= len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) #", "get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return data,", "callback(self, dir, file) ''' Get iamge data ''' def get_image_data(self, tp): image1, image2", "e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset '''", "(c) 2018 Hui ''' import os import scipy.misc as scm import random import", "40 import matplotlib.pyplot as plt ''' This class stands for dataset and provides", "# obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8) # while done !=", "%d, poscount: %d\" % (self.negcount, self.poscount)) return True ''' Check if image has", "oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size =", "splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]),", "poscount: %d\" % (self.negcount, self.poscount)) return True ''' Check if image has 4", "''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data) pos_train_size", "= scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH)))", "range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels =", "count += 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height / count ''' Get", "PIL # STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT", "''' Get test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels =", "dummy(self, dir, file): filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir,", "= self.get_size(fullfile) full_width += width full_height += height print(\"%s, %s\" % (width, height))", "3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset '''", "neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return", "import numpy as np import PIL # STD_WIDTH = 667 # STD_HEIGHT =", "= self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total]", "- len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set", "neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0", "int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index =", "pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size]", "''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set)))", "obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8) # while done", "''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset", "img_comb / 255.0 ''' Get a batch of dataset ''' def next_batch(self, batch_size):", "full_width, full_height, count filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir,", "= int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index", "for dataset and provides data processing oparations ''' class DataSet(object): def __init__(self, data_dir,", "// 2 random_pos = batch_size - random_neg org_pos_data = [] org_neg_data = []", "dec = len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = []", "+= 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount +=", "in dataset dir ''' def lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir):", "org_pos_data = [] org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _", "e[2], self.validation_set))) return data, labels ''' Get test dataset ''' def get_test_set(self): data", "batch_size - random_neg org_pos_data = [] org_neg_data = [] for _ in range(random_pos):", "img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) '''", "count filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file)", "= 0 self.negcount = 0 def dummy(self, dir, file): if file == 'dataset.txt':", "Last Modified: Saturday March 31st 2018 11:08:21 pm Modified By: Huisama ----- Copyright", "if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if", "import random import numpy as np import PIL # STD_WIDTH = 667 #", "dir in dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for file in", "total = len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size =", "dummy(self, dir, file): nonlocal full_width, full_height, count filename = os.path.splitext(file) if filename[1] ==", "def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset '''", "= len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set)", "= neg_total - neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 :", "''' def check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file) if filename[1] ==", "width full_height += height print(\"%s, %s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy)", "height)) count += 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height / count '''", "dataset and provides data processing oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size):", "dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d,", "[] self.pos_data = [] self.poscount = 0 self.negcount = 0 def dummy(self, dir,", "self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir '''", "nonlocal full_width, full_height, count filename = os.path.splitext(file) if filename[1] == '.png': fullfile =", "/ count, full_height / count ''' Get width and height of a single", "self.lookup_dataset_dir(dummy) return full_width / count, full_height / count ''' Get width and height", "list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset", "img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data = []", "for _, dirs, _ in os.walk(self.data_dir): for dir in dirs: for _, _,", "''' Get iamge data ''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1])", "This class stands for dataset and provides data processing oparations ''' class DataSet(object):", "- len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for", "neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set =", "def next_batch(self, batch_size): random_neg = batch_size // 2 random_pos = batch_size - random_neg", "pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size =", "org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set))", "pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size :", "# print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return True ''' Check if", "random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total *", "''' Get a batch of dataset ''' def next_batch(self, batch_size): random_neg = batch_size", "1 self.lookup_dataset_dir(dummy) return full_width / count, full_height / count ''' Get width and", "stands for dataset and provides data processing oparations ''' class DataSet(object): def __init__(self,", "True ''' Check if image has 4 channel ''' def check_image_channels(self): def dummy(self,", "read in with open(os.path.join(self.data_dir, dir, file)) as file: for line in file: newline", "= list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data))", "np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:,", "file: for line in file: newline = line.strip() splittext = newline.split('\\t') if int(splittext[2])", "= newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]),", "+= width full_height += height print(\"%s, %s\" % (width, height)) count += 1", "= np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return data, labels #", "org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels =", "def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data) pos_train_size =", "os.walk(self.data_dir): for dir in dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for", "np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels", "neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get", ": neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec =", "len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size", "= 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size +", "# neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set =", "neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set)", "%s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height", "self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size -", "return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data,", "labels = np.array(list(map(lambda e: e[2], self.test_set))) return data, labels # obj = DataSet('./Pic',", "self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size", "e: e[2], self.test_set))) return data, labels # obj = DataSet('./Pic', 8) # obj.check_image_channels()", "self.validation_set))) return data, labels ''' Get test dataset ''' def get_test_set(self): data =", "in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data))", "# STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot as", "def load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount = 0 self.negcount =", "* self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size", "self.test_set))) return data, labels # obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset()", "len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set)", "STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT = 40", "self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set", "pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set", "(STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2))", "''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT,", "+ neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _ in range(dec):", "len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set =", "= 667 # STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT = 40 import", "= self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total]", "newimg2)) return img_comb / 255.0 ''' Get a batch of dataset ''' def", "''' def next_batch(self, batch_size): random_neg = batch_size // 2 random_pos = batch_size -", "= np.array(list(map(lambda e: e[2], self.test_set))) return data, labels # obj = DataSet('./Pic', 8)", "self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size", "for file in files: callback(self, dir, file) ''' Get iamge data ''' def", "in dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for file in files:", "# total = len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size", "255.0 ''' Get a batch of dataset ''' def next_batch(self, batch_size): random_neg =", "open(os.path.join(self.data_dir, dir, file)) as file: for line in file: newline = line.strip() splittext", "= self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set =", "has 4 channel ''' def check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file)", "pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total *", "data, labels # obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset()", "generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total", "import os import scipy.misc as scm import random import numpy as np import", "Ergodic files in dataset dir ''' def lookup_dataset_dir(self, callback): for _, dirs, _", "for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _ in", "[] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data =", "- pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size", "----- Last Modified: Saturday March 31st 2018 11:08:21 pm Modified By: Huisama -----", "for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = []", "pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data,", "image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset ''' def generate_dataset(self):", "# STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT =", "def dummy(self, dir, file): filename = os.path.splitext(file) if filename[1] == '.png': fullfile =", "pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio)", "def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH)))", "matplotlib.pyplot as plt ''' This class stands for dataset and provides data processing", "full_height / count ''' Get width and height of a single image '''", "'.png': fullfile = os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width += width", "file) width, height = self.get_size(fullfile) full_width += width full_height += height print(\"%s, %s\"", "for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir,", "self.neg_data = [] self.pos_data = [] self.poscount = 0 self.negcount = 0 def", "self.poscount)) return True ''' Check if image has 4 channel ''' def check_image_channels(self):", "''' def lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir): for dir in", "4 channel ''' def check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file) if", "= os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) width, height", "os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\",", "img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get a batch of", "self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size", "pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total", "pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set =", "os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\"", "dir, file) width, height = self.get_size(fullfile) full_width += width full_height += height print(\"%s,", "batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get mean width and height", "as file: for line in file: newline = line.strip() splittext = newline.split('\\t') if", "''' Generate dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total", "= batch_size - random_neg org_pos_data = [] org_neg_data = [] for _ in", "obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels", "= 40 import matplotlib.pyplot as plt ''' This class stands for dataset and", "dir, file): nonlocal full_width, full_height, count filename = os.path.splitext(file) if filename[1] == '.png':", "_ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data))", "neg_total - neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size]", "Get test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda", "line in file: newline = line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1:", "DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8)", "np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1,", "return img_comb / 255.0 ''' Get a batch of dataset ''' def next_batch(self,", "0 def dummy(self, dir, file): if file == 'dataset.txt': # open and read", "def check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file) if filename[1] == '.png':", "list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels", "(STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :,", "31st 2018 11:08:21 pm Modified By: Huisama ----- Copyright (c) 2018 Hui '''", "dir, file) img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile)", "pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size]", "full_height += height print(\"%s, %s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy) return", "''' Get width and height of a single image ''' def get_size(self, image_file_path):", "% (width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height /", "= int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total", "2 random_pos = batch_size - random_neg org_pos_data = [] org_neg_data = [] for", "= line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]),", "[] self.poscount = 0 self.negcount = 0 def dummy(self, dir, file): if file", "= data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get", "- pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total", "img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data", "of dataset ''' def next_batch(self, batch_size): random_neg = batch_size // 2 random_pos =", "Author: Huisama ----- Last Modified: Saturday March 31st 2018 11:08:21 pm Modified By:", "for line in file: newline = line.strip() splittext = newline.split('\\t') if int(splittext[2]) ==", "os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir,", ": pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size +", "# obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data,", "self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec", "dir, file) ''' Get iamge data ''' def get_image_data(self, tp): image1, image2 =", "with open(os.path.join(self.data_dir, dir, file)) as file: for line in file: newline = line.strip()", "img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data =", "def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return", "+ neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set) -", "= np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb =", "= len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) -", "dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir,", "def get_data_mean_size(self): full_width, full_height = 0, 0 count = 0 def dummy(self, dir,", "self.poscount = 0 self.negcount = 0 def dummy(self, dir, file): if file ==", "self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set", "def dummy(self, dir, file): if file == 'dataset.txt': # open and read in", "= np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get a batch of dataset", "dir, file): if file == 'dataset.txt': # open and read in with open(os.path.join(self.data_dir,", "''' Get validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels =", "Get validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda", "= [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir ''' def lookup_dataset_dir(self,", "numpy as np import PIL # STD_WIDTH = 667 # STD_HEIGHT = 83", "0 count = 0 def dummy(self, dir, file): nonlocal full_width, full_height, count filename", "scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb", "self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return data, labels # obj =", "full_height, count filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir,", "pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self): data", "* self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size", "_ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set)", "__init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio", "1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1", "int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total -", "neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size :", "neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size - neg_validate_size", "list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2],", "* self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size - neg_validate_size self.batch_index = 0", "os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount:", "len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set)", "int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total -", "[] org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg):", "# pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size =", "Huisama ----- Last Modified: Saturday March 31st 2018 11:08:21 pm Modified By: Huisama", "files in dataset dir ''' def lookup_dataset_dir(self, callback): for _, dirs, _ in", "img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb /", "Huisama ----- Copyright (c) 2018 Hui ''' import os import scipy.misc as scm", "height of dataset ''' def get_data_mean_size(self): full_width, full_height = 0, 0 count =", "and provides data processing oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir", "len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size", "[] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir ''' def lookup_dataset_dir(self, callback):", "batch_size): random_neg = batch_size // 2 random_pos = batch_size - random_neg org_pos_data =", "in file: newline = line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append((", "Date: Monday March 26th 2018 Author: Huisama ----- Last Modified: Saturday March 31st", "dataset ''' def next_batch(self, batch_size): random_neg = batch_size // 2 random_pos = batch_size", "STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot as plt", "8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8) #", "0.8 self.validate_set_ratio = 0.1 ''' Get mean width and height of dataset '''", "Get a batch of dataset ''' def next_batch(self, batch_size): random_neg = batch_size //", "# while done != True: # print(data[0][0].dtype) # data, labels, done = obj.next_batch()", "2018 11:08:21 pm Modified By: Huisama ----- Copyright (c) 2018 Hui ''' import", "org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data =", "if file == 'dataset.txt': # open and read in with open(os.path.join(self.data_dir, dir, file))", "e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def", "self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files", "Monday March 26th 2018 Author: Huisama ----- Last Modified: Saturday March 31st 2018", "class stands for dataset and provides data processing oparations ''' class DataSet(object): def", "newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get", "a single image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0]", "return True ''' Check if image has 4 channel ''' def check_image_channels(self): def", "data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return data, labels", "count ''' Get width and height of a single image ''' def get_size(self,", "Load dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount =", "for dir in dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for file", "file in files: callback(self, dir, file) ''' Get iamge data ''' def get_image_data(self,", "data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get mean", "self.validate_set_ratio = 0.1 ''' Get mean width and height of dataset ''' def", ": neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size +", "np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0 '''", "= [] org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in", "return data, labels ''' Get test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data,", "def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return", "file): nonlocal full_width, full_height, count filename = os.path.splitext(file) if filename[1] == '.png': fullfile", "else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy)", "= batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get mean width and", "np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set)))", "height = self.get_size(fullfile) full_width += width full_height += height print(\"%s, %s\" % (width,", "# open and read in with open(os.path.join(self.data_dir, dir, file)) as file: for line", "self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return True ''' Check", "+ pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 :", "self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels ''' Get test", "range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data", "in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir, file) ''' Get iamge", "= obj.next_batch(8) # while done != True: # print(data[0][0].dtype) # data, labels, done", "filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) img", "int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount +=", "* self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data)", "for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data,", "+ pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size :", "= [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data", "0.1 ''' Get mean width and height of dataset ''' def get_data_mean_size(self): full_width,", "# img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb", "0, 0 count = 0 def dummy(self, dir, file): nonlocal full_width, full_height, count", "self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _ in", "file == 'dataset.txt': # open and read in with open(os.path.join(self.data_dir, dir, file)) as", "0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size]", "class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio", "= list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e:", "import scipy.misc as scm import random import numpy as np import PIL #", "NumberRecongization Created Date: Monday March 26th 2018 Author: Huisama ----- Last Modified: Saturday", "== 'dataset.txt': # open and read in with open(os.path.join(self.data_dir, dir, file)) as file:", "data, labels ''' Get test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set)))", "labels = obj.next_batch(8) # while done != True: # print(data[0][0].dtype) # data, labels,", "= np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels ''' Get test dataset '''", "11:08:21 pm Modified By: Huisama ----- Copyright (c) 2018 Hui ''' import os", "get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset ''' def", "org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels)", "neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size :", "667 # STD_HEIGHT = 83 STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot", "loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total =", "img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading", "+= height print(\"%s, %s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width", "width, height = self.get_size(fullfile) full_width += width full_height += height print(\"%s, %s\" %", "np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get a batch of dataset '''", "Saturday March 31st 2018 11:08:21 pm Modified By: Huisama ----- Copyright (c) 2018", "26th 2018 Author: Huisama ----- Last Modified: Saturday March 31st 2018 11:08:21 pm", "Modified: Saturday March 31st 2018 11:08:21 pm Modified By: Huisama ----- Copyright (c)", "count, full_height / count ''' Get width and height of a single image", "image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2,", "splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount:", "''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set)))", "+= 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return True", "scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset", "e: e[2], self.validation_set))) return data, labels ''' Get test dataset ''' def get_test_set(self):", "% (self.negcount, self.poscount)) return True ''' Check if image has 4 channel '''", "e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data))", "_, files in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir, file) '''", "file) ''' Get iamge data ''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]),", "1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else:", "os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir, file) ''' Get iamge data", "in files: callback(self, dir, file) ''' Get iamge data ''' def get_image_data(self, tp):", "count = 0 def dummy(self, dir, file): nonlocal full_width, full_height, count filename =", "= list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation", "newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb =", "''' Load dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount", ":, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get a", "image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load", "(self.negcount, self.poscount)) return True ''' Check if image has 4 channel ''' def", "Copyright (c) 2018 Hui ''' import os import scipy.misc as scm import random", "def lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir): for dir in dirs:", "np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0 ''' Get a batch", "if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile)", "filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) width,", "splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir,", "252 STD_HEIGHT = 40 import matplotlib.pyplot as plt ''' This class stands for", "width and height of dataset ''' def get_data_mean_size(self): full_width, full_height = 0, 0", "obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8) # while done != True:", "0 self.negcount = 0 def dummy(self, dir, file): if file == 'dataset.txt': #", "By: Huisama ----- Copyright (c) 2018 Hui ''' import os import scipy.misc as", "2018 Hui ''' import os import scipy.misc as scm import random import numpy", "a batch of dataset ''' def next_batch(self, batch_size): random_neg = batch_size // 2", "self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 ''' Get mean width and height of", "= scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate", "= pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total *", "labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels ''' Get test dataset", "data, labels = obj.next_batch(8) # while done != True: # print(data[0][0].dtype) # data,", "newline = line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir,", "self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set", "= self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _", "os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width += width full_height += height", "org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda", "np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e: e[2], self.test_set))) return data, labels # obj", "= batch_size // 2 random_pos = batch_size - random_neg org_pos_data = [] org_neg_data", "height of a single image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return", "pm Modified By: Huisama ----- Copyright (c) 2018 Hui ''' import os import", "self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1 '''", "Hui ''' import os import scipy.misc as scm import random import numpy as", "print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return True ''' Check if image", "os import scipy.misc as scm import random import numpy as np import PIL", "filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2]", "# obj.generate_dataset() # data, labels = obj.next_batch(8) # while done != True: #", "batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio = 0.8 self.validate_set_ratio = 0.1", "splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]),", "in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set)", "test dataset ''' def get_test_set(self): data = np.array(list(map(self.get_image_data, self.test_set))) labels = np.array(list(map(lambda e:", "= np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return img_comb / 255.0", "self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) #", "neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size", "self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set", "scipy.misc as scm import random import numpy as np import PIL # STD_WIDTH", "!= 3: print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset", "''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size", "/ count ''' Get width and height of a single image ''' def", "dataset dir ''' def lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir): for", "of a single image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1],", "return data, labels # obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() #", "83 STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot as plt ''' This", "random import numpy as np import PIL # STD_WIDTH = 667 # STD_HEIGHT", "self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir ''' def lookup_dataset_dir(self, callback): for", "''' def load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount = 0 self.negcount", "= [] self.pos_data = [] self.poscount = 0 self.negcount = 0 def dummy(self,", "neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio)", ": pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set =", "self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) # neg_test_size = neg_total - neg_train_size -", "list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda e: e[2], org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels)", "int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2])))", "- neg_train_size - neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set", "fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data)", "# obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() # data, labels = obj.next_batch(8) # while", "in os.walk(self.data_dir): for dir in dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)):", "File: \\resource.py Project: NumberRecongization Created Date: Monday March 26th 2018 Author: Huisama -----", "pos_labels = list(map(lambda e: e[2], org_pos_data)) neg_data = list(map(self.get_image_data, org_neg_data)) neg_labels = list(map(lambda", "pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self): data =", "def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size = batch_size self.train_set_ratio = 0.8", "== '.png': fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2] !=", "np import PIL # STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH =", "2018 Author: Huisama ----- Last Modified: Saturday March 31st 2018 11:08:21 pm Modified", "open and read in with open(os.path.join(self.data_dir, dir, file)) as file: for line in", "self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir ''' def", "pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio)", "- random_neg org_pos_data = [] org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set))", "np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels ''' Get", "os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) width, height =", "callback): for _, dirs, _ in os.walk(self.data_dir): for dir in dirs: for _,", "print(\"%s, %s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width / count,", "os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile)", "in with open(os.path.join(self.data_dir, dir, file)) as file: for line in file: newline =", "neg_train_size + neg_validate_size] neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set)", "_ in os.walk(self.data_dir): for dir in dirs: for _, _, files in os.walk(os.path.join(self.data_dir,", "file)) as file: for line in file: newline = line.strip() splittext = newline.split('\\t')", "file): filename = os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file)", "= self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size + neg_validate_size] neg_test_set =", "= np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1,", "lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir): for dir in dirs: for", "self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset dir", "dataset ''' def load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount = 0", "int(splittext[2]))) self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount))", "self.pos_data = [] self.poscount = 0 self.negcount = 0 def dummy(self, dir, file):", "= 0 def dummy(self, dir, file): nonlocal full_width, full_height, count filename = os.path.splitext(file)", "image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT,", "= 0, 0 count = 0 def dummy(self, dir, file): nonlocal full_width, full_height,", "self.negcount += 1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return", "image has 4 channel ''' def check_image_channels(self): def dummy(self, dir, file): filename =", "_ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e: e[2],", "mean width and height of dataset ''' def get_data_mean_size(self): full_width, full_height = 0,", "''' File: \\resource.py Project: NumberRecongization Created Date: Monday March 26th 2018 Author: Huisama", ": neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec", "e[2], self.test_set))) return data, labels # obj = DataSet('./Pic', 8) # obj.check_image_channels() #", "_, dirs, _ in os.walk(self.data_dir): for dir in dirs: for _, _, files", "dir ''' def lookup_dataset_dir(self, callback): for _, dirs, _ in os.walk(self.data_dir): for dir", "processing oparations ''' class DataSet(object): def __init__(self, data_dir, batch_size): self.data_dir = data_dir self.batch_size", "= self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set =", "fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong", "line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir,", "+= 1 self.lookup_dataset_dir(dummy) return full_width / count, full_height / count ''' Get width", "= [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in", "in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data))", "len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total *", "newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2])))", "dec = len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set)", "np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels ''' Get test dataset ''' def", "Check if image has 4 channel ''' def check_image_channels(self): def dummy(self, dir, file):", "= os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width += width full_height +=", "org_neg_data)) pos_data.extend(neg_data) pos_labels.extend(neg_labels) return np.array(pos_data), np.array(pos_labels) ''' Get validation dataset ''' def get_validation_set(self):", "neg_test_set = self.neg_data[neg_train_size + neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for", "= [] self.poscount = 0 self.negcount = 0 def dummy(self, dir, file): if", "channel ''' def check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file) if filename[1]", "Modified By: Huisama ----- Copyright (c) 2018 Hui ''' import os import scipy.misc", "''' This class stands for dataset and provides data processing oparations ''' class", "0 def dummy(self, dir, file): nonlocal full_width, full_height, count filename = os.path.splitext(file) if", "file: newline = line.strip() splittext = newline.split('\\t') if int(splittext[2]) == 1: self.pos_data.append(( os.path.join(self.data_dir,", "newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb", "'dataset.txt': # open and read in with open(os.path.join(self.data_dir, dir, file)) as file: for", "''' Ergodic files in dataset dir ''' def lookup_dataset_dir(self, callback): for _, dirs,", "labels # obj = DataSet('./Pic', 8) # obj.check_image_channels() # obj.load_dataset() # obj.generate_dataset() #", "iamge data ''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 =", "file): if file == 'dataset.txt': # open and read in with open(os.path.join(self.data_dir, dir,", "== 1: self.pos_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1", "single image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] '''", "random.shuffle(self.pos_data) # total = len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio)", "check_image_channels(self): def dummy(self, dir, file): filename = os.path.splitext(file) if filename[1] == '.png': fullfile", "_, _, files in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir, file)", "= os.path.splitext(file) if filename[1] == '.png': fullfile = os.path.join(self.data_dir, dir, file) img =", "dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data)", "# data, labels = obj.next_batch(8) # while done != True: # print(data[0][0].dtype) #", "and height of a single image ''' def get_size(self, image_file_path): img = scm.imread(image_file_path)", "- neg_validate_size self.batch_index = 0 self.pos_train_set = self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size", "for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels = list(map(lambda e:", "neg_validate_size : neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data))", "batch_size // 2 random_pos = batch_size - random_neg org_pos_data = [] org_neg_data =", "obj.generate_dataset() # data, labels = obj.next_batch(8) # while done != True: # print(data[0][0].dtype)", "tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 =", "self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size - pos_validate_size neg_total = len(self.neg_data) neg_train_size", "Created Date: Monday March 26th 2018 Author: Huisama ----- Last Modified: Saturday March", "height print(\"%s, %s\" % (width, height)) count += 1 self.lookup_dataset_dir(dummy) return full_width /", "len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _", "def dummy(self, dir, file): nonlocal full_width, full_height, count filename = os.path.splitext(file) if filename[1]", "next_batch(self, batch_size): random_neg = batch_size // 2 random_pos = batch_size - random_neg org_pos_data", "\\resource.py Project: NumberRecongization Created Date: Monday March 26th 2018 Author: Huisama ----- Last", "= os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2] != 3: print(\"Wrong image:", "self.pos_data[0 : pos_train_size] pos_validation_set = self.pos_data[pos_train_size : pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size", "files: callback(self, dir, file) ''' Get iamge data ''' def get_image_data(self, tp): image1,", "= np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels '''", "STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis] img_comb = np.dstack((newimg1, newimg2)) return", "Get iamge data ''' def get_image_data(self, tp): image1, image2 = scm.imread(tp[0]), scm.imread(tp[1]) newimg1", "dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2],", "range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set = [] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) '''", "Project: NumberRecongization Created Date: Monday March 26th 2018 Author: Huisama ----- Last Modified:", "= 0.1 ''' Get mean width and height of dataset ''' def get_data_mean_size(self):", "dir, file)) as file: for line in file: newline = line.strip() splittext =", "%d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data)", "pos_train_size + pos_validate_size] pos_test_set = self.pos_data[pos_train_size + pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0", "range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _ in range(dec): pos_test_set.append(random.choice(self.pos_data)) self.validation_set", "pos_validate_size = int(pos_total * self.validate_set_ratio) # pos_test_size = pos_total - pos_train_size - pos_validate_size", "np.array(list(map(lambda e: e[2], self.test_set))) return data, labels # obj = DataSet('./Pic', 8) #", "= 0 def dummy(self, dir, file): if file == 'dataset.txt': # open and", "data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e: e[2], self.validation_set))) return data, labels", "full_width / count, full_height / count ''' Get width and height of a", "validation dataset ''' def get_validation_set(self): data = np.array(list(map(self.get_image_data, self.validation_set))) labels = np.array(list(map(lambda e:", "''' import os import scipy.misc as scm import random import numpy as np", "dirs, _ in os.walk(self.data_dir): for dir in dirs: for _, _, files in", "files in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self, dir, file) ''' Get", "after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total = len(self.data) pos_total", "neg_total] dec = len(neg_validation_set) - len(pos_validation_set) for _ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec =", "batch of dataset ''' def next_batch(self, batch_size): random_neg = batch_size // 2 random_pos", "March 31st 2018 11:08:21 pm Modified By: Huisama ----- Copyright (c) 2018 Hui", "and height of dataset ''' def get_data_mean_size(self): full_width, full_height = 0, 0 count", "STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) # img_comb = np.hstack((newimg1, newimg2))[:, :, np.newaxis]", "self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.negcount", "random_neg = batch_size // 2 random_pos = batch_size - random_neg org_pos_data = []", "random_neg org_pos_data = [] org_neg_data = [] for _ in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for", "/ 255.0 ''' Get a batch of dataset ''' def next_batch(self, batch_size): random_neg", "Get mean width and height of dataset ''' def get_data_mean_size(self): full_width, full_height =", "image_file_path): img = scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self):", "[] self.validation_set.extend(pos_validation_set) self.validation_set.extend(neg_validation_set) self.test_set = [] self.test_set.extend(pos_test_set) self.test_set.extend(neg_test_set) ''' Ergodic files in dataset", "dataset ''' def get_data_mean_size(self): full_width, full_height = 0, 0 count = 0 def", "if image has 4 channel ''' def check_image_channels(self): def dummy(self, dir, file): filename", "import PIL # STD_WIDTH = 667 # STD_HEIGHT = 83 STD_WIDTH = 252", "print(\"Wrong image: %d\", fullfile) self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset ''' def", "STD_HEIGHT = 40 import matplotlib.pyplot as plt ''' This class stands for dataset", "os.path.join(self.data_dir, dir, splittext[1]), int(splittext[2]))) self.poscount += 1 else: self.neg_data.append(( os.path.join(self.data_dir, dir, splittext[0]), os.path.join(self.data_dir,", "Generate dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) # total =", "== '.png': fullfile = os.path.join(self.data_dir, dir, file) width, height = self.get_size(fullfile) full_width +=", "_ in range(dec): pos_validation_set.append(random.choice(self.pos_data)) dec = len(neg_test_set) - len(pos_test_set) for _ in range(dec):", "----- Copyright (c) 2018 Hui ''' import os import scipy.misc as scm import", "dirs: for _, _, files in os.walk(os.path.join(self.data_dir, dir)): for file in files: callback(self,", "= 0.8 self.validate_set_ratio = 0.1 ''' Get mean width and height of dataset", "full_height = 0, 0 count = 0 def dummy(self, dir, file): nonlocal full_width,", "in range(random_pos): org_pos_data.append(random.choice(self.pos_train_set)) for _ in range(random_neg): org_neg_data.append(random.choice(self.neg_train_set)) pos_data = list(map(self.get_image_data, org_pos_data)) pos_labels", "Get width and height of a single image ''' def get_size(self, image_file_path): img", "= len(self.data) pos_total = len(self.pos_data) pos_train_size = int(pos_total * self.train_set_ratio) pos_validate_size = int(pos_total", "pos_validate_size : pos_total] self.neg_train_set = self.neg_data[0 : neg_train_size] neg_validation_set = self.neg_data[neg_train_size : neg_train_size", "= 83 STD_WIDTH = 252 STD_HEIGHT = 40 import matplotlib.pyplot as plt '''", "''' Check if image has 4 channel ''' def check_image_channels(self): def dummy(self, dir,", "= len(self.neg_data) neg_train_size = int(neg_total * self.train_set_ratio) neg_validate_size = int(neg_total * self.validate_set_ratio) #", "load_dataset(self): self.neg_data = [] self.pos_data = [] self.poscount = 0 self.negcount = 0", "and read in with open(os.path.join(self.data_dir, dir, file)) as file: for line in file:", "'.png': fullfile = os.path.join(self.data_dir, dir, file) img = scm.imread(fullfile) if img.shape[2] != 3:", "scm.imread(tp[0]), scm.imread(tp[1]) newimg1 = np.array(scm.imresize(image1, (STD_HEIGHT, STD_WIDTH))) newimg2 = np.array(scm.imresize(image2, (STD_HEIGHT, STD_WIDTH))) #", "1 self.lookup_dataset_dir(dummy) # print(\"negcount: %d, poscount: %d\" % (self.negcount, self.poscount)) return True '''", "self.lookup_dataset_dir(dummy) ''' Generate dataset after loading dataset ''' def generate_dataset(self): random.shuffle(self.neg_data) random.shuffle(self.pos_data) #", "import matplotlib.pyplot as plt ''' This class stands for dataset and provides data", "scm.imread(image_file_path) return img.shape[1], img.shape[0] ''' Load dataset ''' def load_dataset(self): self.neg_data = []" ]
[ "low < high: max_area = max(max_area, (high - low) * min(height[low], height[high])) if", "max_area = 0 while low < high: max_area = max(max_area, (high - low)", "len(height) - 1 max_area = 0 while low < high: max_area = max(max_area,", "#!/usr/bin/env python #coding: utf-8 class Solution: # @return an integer def maxArea(self, height):", "high: max_area = max(max_area, (high - low) * min(height[low], height[high])) if height[low] <", "class Solution: # @return an integer def maxArea(self, height): low, high = 0,", "#coding: utf-8 class Solution: # @return an integer def maxArea(self, height): low, high", "< high: max_area = max(max_area, (high - low) * min(height[low], height[high])) if height[low]", "height[high])) if height[low] < height[high]: low += 1 else: high -= 1 return", "* min(height[low], height[high])) if height[low] < height[high]: low += 1 else: high -=", "if height[low] < height[high]: low += 1 else: high -= 1 return max_area", "0, len(height) - 1 max_area = 0 while low < high: max_area =", "an integer def maxArea(self, height): low, high = 0, len(height) - 1 max_area", "max_area = max(max_area, (high - low) * min(height[low], height[high])) if height[low] < height[high]:", "low) * min(height[low], height[high])) if height[low] < height[high]: low += 1 else: high", "0 while low < high: max_area = max(max_area, (high - low) * min(height[low],", "= 0 while low < high: max_area = max(max_area, (high - low) *", "min(height[low], height[high])) if height[low] < height[high]: low += 1 else: high -= 1", "= max(max_area, (high - low) * min(height[low], height[high])) if height[low] < height[high]: low", "- 1 max_area = 0 while low < high: max_area = max(max_area, (high", "def maxArea(self, height): low, high = 0, len(height) - 1 max_area = 0", "low, high = 0, len(height) - 1 max_area = 0 while low <", "maxArea(self, height): low, high = 0, len(height) - 1 max_area = 0 while", "while low < high: max_area = max(max_area, (high - low) * min(height[low], height[high]))", "max(max_area, (high - low) * min(height[low], height[high])) if height[low] < height[high]: low +=", "1 max_area = 0 while low < high: max_area = max(max_area, (high -", "(high - low) * min(height[low], height[high])) if height[low] < height[high]: low += 1", "@return an integer def maxArea(self, height): low, high = 0, len(height) - 1", "= 0, len(height) - 1 max_area = 0 while low < high: max_area", "- low) * min(height[low], height[high])) if height[low] < height[high]: low += 1 else:", "utf-8 class Solution: # @return an integer def maxArea(self, height): low, high =", "python #coding: utf-8 class Solution: # @return an integer def maxArea(self, height): low,", "integer def maxArea(self, height): low, high = 0, len(height) - 1 max_area =", "Solution: # @return an integer def maxArea(self, height): low, high = 0, len(height)", "# @return an integer def maxArea(self, height): low, high = 0, len(height) -", "high = 0, len(height) - 1 max_area = 0 while low < high:", "height): low, high = 0, len(height) - 1 max_area = 0 while low" ]
[ "passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id,", "[] iter_size = group_size if i != group_num - 1 else last_group for", "0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file)", "row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc()", "= excel.sheet_by_index(0) row_num = table.nrows - 1 group_size = 10000 group_num = row_num", "group_size if last_group != 0: group_num = group_num + 1 for i in", "dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except: return 0", "mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0)", "sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category,", "mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close()", "\"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\":", "in range(group_num): records = [] iter_size = group_size if i != group_num -", "data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14])))", "from datetime import datetime, timedelta import traceback month_dict = {'JAN': 1, 'FEB': 2,", "row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW", "as source: line = source.readline() while line and line.strip() != '': line =", "int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0])", "= int(time_array[2]) return datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file): start =", "row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit()", "8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict = {'13': 2013,", "') date_array = dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]]", "+ i * group_size + 1 row_v = table.row_values(index) record = { \"owner\":", "get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except", "= False if not ignore and count % 1000000 == 0: mysqldb.commit() else:", "print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array", "repo, year, province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty,", "start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor()", "data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if not", "'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP':", "date_array = dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array", "print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10],", "= source.readline() while line and line.strip() != '': line = source.readline() if line", "group_num - 1 else last_group for j in range(iter_size): index = j +", "dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]]", "global_focus_name, qty) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor", "sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size,", "int(mdy[1])) def get_int(original): try: return int(original) except: return 0 def get_float(original): try: return", "row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1,", "int(original) - 41758 return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy =", "'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict", "datetime, timedelta import traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR':", "pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year):", "table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5],", "= j + i * group_size + 1 row_v = table.row_values(index) record =", "month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN':", "requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s, %s, %s, %s, %s,", "0 with open(file, mode='r', encoding='utf-8') as source: line = source.readline() while line and", "timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try:", "finally: mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758 return datetime(2014, 4, 29)", "second = int(time_array[2]) return datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file): start", "mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size,", "row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\")", "finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\")", "global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s,", "row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{}", "'' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk", "records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ')", "= 10000 group_num = row_num // group_size last_group = row_num - group_num *", "if row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index,", "in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3],", "\"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if", "= source.readline() if line and line.strip() != '': count += 1 ignore =", "import pymongo import pymysql from datetime import datetime, timedelta import traceback month_dict =", "inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\",", "traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0):", "getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]),", "MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day", "sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s,", "1 group_size = 10000 group_num = row_num // group_size last_group = row_num -", "sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s, %s, %s,", "province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp,", "== 'PM' and time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1]) second =", "!= '': count += 1 ignore = True print(\"{}: {}\".format(count, line)) if count", "name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s,", "return 0 def get_float(original): try: return float(original) except: return 0 if __name__ ==", "get_int(original): try: return int(original) except: return 0 def get_float(original): try: return float(original) except:", "%s, %s, %s)'''.format(year) try: count = 0 with open(file, mode='r', encoding='utf-8') as source:", "inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\",", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year) try:", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year) try: count", "1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7,", "if last_group != 0: group_num = group_num + 1 for i in range(group_num):", "-*- coding: utf-8 -*- import xlrd import pymongo import pymysql from datetime import", "coding: utf-8 -*- import xlrd import pymongo import pymysql from datetime import datetime,", "be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\",", "int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except: return 0 def get_float(original): try:", "2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0)", "%s, %s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2,", "int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0])", "\"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] !=", "to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-')", "= group_size if i != group_num - 1 else last_group for j in", "try: return int(original) except: return 0 def get_float(original): try: return float(original) except: return", "datetime import datetime, timedelta import traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR':", "for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2],", "year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql", "dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.')", "excel: table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v)", "{}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array =", "write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array =", "descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s,", "city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s,", "global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s, %s, %s,", "ignore and count % 1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except Exception:", "with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num = table.nrows - 1 group_size", "try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows):", "%s, %s, %s, %s, %s, %s)'''.format(year) try: count = 0 with open(file, mode='r',", "print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array", "if line and line.strip() != '': count += 1 ignore = True print(\"{}:", "repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s,", "line.strip() != '': line = source.readline() if line and line.strip() != '': count", "mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num = table.nrows - 1", "#!/usr/bin/env python # -*- coding: utf-8 -*- import xlrd import pymongo import pymysql", "province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette,", "sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s,", "time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2] == 'PM' and", "sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s, %s, %s, %s,", "row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError:", "mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\",", "- 1 group_size = 10000 group_num = row_num // group_size last_group = row_num", "charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province,", "genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s,", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year)", "datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except: return 0 def get_float(original):", "ignore = True print(\"{}: {}\".format(count, line)) if count >= start: line = line.replace('?',", "cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city,", "int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day, hour, minute,", "return datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb", "1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as excel:", "import datetime, timedelta import traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3,", "return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except: return 0 def", "%s, %s, %s, %s, %s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1,", "jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt)", "second) def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\")", "line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5],", "pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year,", "try: return float(original) except: return 0 if __name__ == \"__main__\": #read_from_excel_to_mysql_11(\"/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx\", 17) read_from_txt_to_mysql(\"/Users/leon/Desktop/NIKE/NIKE17年数据.txt\",", "getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original)", "(getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12],", "mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor", "mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta =", "mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color,", "qty) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "%s, %s, %s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX':", "0: group_num = group_num + 1 for i in range(group_num): records = []", "- group_num * group_size if last_group != 0: group_num = group_num + 1", "as excel: table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex)", "inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally:", "= table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8],", "= table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7],", "year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2]", "= row_num // group_size last_group = row_num - group_num * group_size if last_group", "= dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except: return", "3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9,", "+ 12 if dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0]) minute", "genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit()", "row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8],", "group_num * group_size if last_group != 0: group_num = group_num + 1 for", "for i in range(group_num): records = [] iter_size = group_size if i !=", "NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1))", "sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s,", "-1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13])", "discount_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16],", "9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict = {'13': 2013, '14': 2014,", "0 def get_float(original): try: return float(original) except: return 0 if __name__ == \"__main__\":", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''' genderage =", "cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]),", "source: line = source.readline() while line and line.strip() != '': line = source.readline()", "global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s,", "into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu,", "'''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division,", "except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file,", "!= '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\":", "= {'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file):", "user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id,", "mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0])", "data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if not ignore", "mode='r', encoding='utf-8') as source: line = source.readline() while line and line.strip() != '':", "requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s,", "!= '': line = source.readline() if line and line.strip() != '': count +=", "41758 return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return", "row_num // group_size last_group = row_num - group_num * group_size if last_group !=", "gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9],", "= [] iter_size = group_size if i != group_num - 1 else last_group", "month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12 if", "count % 1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally:", "group_num + 1 for i in range(group_num): records = [] iter_size = group_size", "import pymysql from datetime import datetime, timedelta import traceback month_dict = {'JAN': 1,", "2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01", "pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time,", "7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict =", "into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age,", "+ 1 for i in range(group_num): records = [] iter_size = group_size if", "%s, %s, %s, %s, %s, %s, %s)'''.format(year) try: count = 0 with open(file,", "'': line = source.readline() if line and line.strip() != '': count += 1", "row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7],", "table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql,", "= line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9],", "year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2] == 'PM'", "mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return int(original) except:", "= {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try:", "as excel: table = excel.sheet_by_index(0) row_num = table.nrows - 1 group_size = 10000", "= mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name,", "row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW", "user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo,", "charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province,", "1 row_v = table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2],", "rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]),", "\"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) }", "row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6],", "= int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0] != '12' else", "total_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "= row_num - group_num * group_size if last_group != 0: group_num = group_num", "line = source.readline() if line and line.strip() != '': count += 1 ignore", "xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v =", "True print(\"{}: {}\".format(count, line)) if count >= start: line = line.replace('?', '').replace('?', '')", "int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit()", "\"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\":", "excel: table = excel.sheet_by_index(0) row_num = table.nrows - 1 group_size = 10000 group_num", "delta = int(original) - 41758 return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr):", "db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day =", "%s, %s, %s, %s)'''.format(year) try: count = 0 with open(file, mode='r', encoding='utf-8') as", "get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to", "-1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to", "sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s, %s,", "get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc()", "print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def", "'16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with", "%s, %s, %s, %s, %s, %s, %s, %s)'''.format(year) try: count = 0 with", "int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}:", "city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price,", "table = excel.sheet_by_index(0) row_num = table.nrows - 1 group_size = 10000 group_num =", "data[13], get_int(data[14]))) ignore = False if not ignore and count % 1000000 ==", "+ timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original):", "xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num = table.nrows - 1 group_size =", "row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records)", "traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758 return datetime(2014, 4,", "row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex))", "range(iter_size): index = j + i * group_size + 1 row_v = table.row_values(index)", "'' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\": row_v[12],", "minute = int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day, hour, minute, second)", "import xlrd import pymongo import pymysql from datetime import datetime, timedelta import traceback", "'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict = {'13':", "global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s, %s,", "1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql =", "def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\")", "group_size last_group = row_num - group_num * group_size if last_group != 0: group_num", "'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for", "get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{}", "{'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try: with", "line)) if count >= start: line = line.replace('?', '').replace('?', '') data = line.split('\\t')", "date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year =", "row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != ''", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''", "passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id,", "shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name,", "xlrd import pymongo import pymysql from datetime import datetime, timedelta import traceback month_dict", "table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9],", "in range(iter_size): index = j + i * group_size + 1 row_v =", "group_size + 1 row_v = table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1],", "= int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour =", "'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as excel: table", "= line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4],", "count += 1 ignore = True print(\"{}: {}\".format(count, line)) if count >= start:", "- 41758 return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/')", "\"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year) try: count =", "= True print(\"{}: {}\".format(count, line)) if count >= start: line = line.replace('?', '').replace('?',", "sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price,", "data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13],", "= { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\":", "and line.strip() != '': count += 1 ignore = True print(\"{}: {}\".format(count, line))", "data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],", "tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio,", "with open(file, mode='r', encoding='utf-8') as source: line = source.readline() while line and line.strip()", "row_num - group_num * group_size if last_group != 0: group_num = group_num +", "%s, %s, %s, %s, %s, %s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS':", "= 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql", "mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert", "sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s, %s,", "not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name,", "= date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year", "j in range(iter_size): index = j + i * group_size + 1 row_v", "open(file, mode='r', encoding='utf-8') as source: line = source.readline() while line and line.strip() !=", "def get_float(original): try: return float(original) except: return 0 if __name__ == \"__main__\": #read_from_excel_to_mysql_11(\"/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx\",", "int(original) except: return 0 def get_float(original): try: return float(original) except: return 0 if", "= 0 with open(file, mode='r', encoding='utf-8') as source: line = source.readline() while line", "supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s,", "hour = int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0] != '12'", "sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s, %s, %s, %s,", "\"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\":", "'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as", "def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor =", "start: line = line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2],", "get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{}", "get_float(original): try: return float(original) except: return 0 if __name__ == \"__main__\": #read_from_excel_to_mysql_11(\"/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx\", 17)", "print(\"{}: {}\".format(count, line)) if count >= start: line = line.replace('?', '').replace('?', '') data", "\"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close()", "get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW", "price, total_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17],", "into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu,", "mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start =", "else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\":", "data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore =", "'') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7],", "False if not ignore and count % 1000000 == 0: mysqldb.commit() else: break", "2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel:", "else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write", "1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def", "4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1]))", "int(time_array[2]) return datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file): start = 1", "group_size if i != group_num - 1 else last_group for j in range(iter_size):", "row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]),", "row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]),", "platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage,", "'PM' and time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2])", "read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor()", "dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0] !=", "= table.nrows - 1 group_size = 10000 group_num = row_num // group_size last_group", "return float(original) except: return 0 if __name__ == \"__main__\": #read_from_excel_to_mysql_11(\"/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx\", 17) read_from_txt_to_mysql(\"/Users/leon/Desktop/NIKE/NIKE17年数据.txt\", 2017,", "row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11])", "mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758 return datetime(2014, 4, 29) +", "except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file,", "%s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '':", "{'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL':", "city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender,", "'''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category,", "1 for i in range(group_num): records = [] iter_size = group_size if i", "group_num = row_num // group_size last_group = row_num - group_num * group_size if", "rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2],", "datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]),", "user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop,", "province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty) values(%s, %s,", "= int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day, hour, minute, second) def", "row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1,", "data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False", "% 1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close()", "print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def", "print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb =", "table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6],", "and count % 1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc()", "-1} try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex in range(start,", "be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\",", "total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "table.nrows - 1 group_size = 10000 group_num = row_num // group_size last_group =", "'''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code,", "db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo,", "genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s, %s,", "range(group_num): records = [] iter_size = group_size if i != group_num - 1", "row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10]", "row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit()", "print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be", "'14': 2014, '15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient()", "= '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name,", "day = int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour", "= year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2] ==", ">= start: line = line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1],", "i in range(group_num): records = [] iter_size = group_size if i != group_num", "\"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] !=", "!= '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record)", "line = source.readline() while line and line.strip() != '': line = source.readline() if", "mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758", "6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}", "inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally:", "row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]),", "pymysql from datetime import datetime, timedelta import traceback month_dict = {'JAN': 1, 'FEB':", "\"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\":", "<filename>data_prepare.py #!/usr/bin/env python # -*- coding: utf-8 -*- import xlrd import pymongo import", "day, hour, minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\",", "row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7],", "row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16])))", "line and line.strip() != '': line = source.readline() if line and line.strip() !=", "row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\": row_v[8], \"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10])", "table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]],", "index = j + i * group_size + 1 row_v = table.row_values(index) record", "try: count = 0 with open(file, mode='r', encoding='utf-8') as source: line = source.readline()", "0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta", "out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp,", "else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day, hour,", "11, 'DEC': 12} year_dict = {'13': 2013, '14': 2014, '15': 2015, '16': 2016,", "else last_group for j in range(iter_size): index = j + i * group_size", "pymongo import pymysql from datetime import datetime, timedelta import traceback month_dict = {'JAN':", "= mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num = table.nrows -", "pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop,", "row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError:", "sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id,", "db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time,", "'17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as", "global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt) values(%s, %s, %s, %s, %s, %s,", "if dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1])", "line and line.strip() != '': count += 1 ignore = True print(\"{}: {}\".format(count,", "count >= start: line = line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]),", "except: return 0 def get_float(original): try: return float(original) except: return 0 if __name__", "{ \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5],", "def getsqldatestr(original): delta = int(original) - 41758 return datetime(2014, 4, 29) + timedelta(days=delta)", "cursor = mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city,", "to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb =", "4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10,", "'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT':", "2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file)", "cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]),", "row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]),", "line = line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3],", "i * group_size + 1 row_v = table.row_values(index) record = { \"owner\": row_v[0],", "count = 0 with open(file, mode='r', encoding='utf-8') as source: line = source.readline() while", "traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5,", "genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3, '': -1}", "j + i * group_size + 1 row_v = table.row_values(index) record = {", "hour, minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\",", "print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9],", "3, '': -1} try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex", "start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql =", "%s, %s, %s, %s, %s)'''.format(year) try: count = 0 with open(file, mode='r', encoding='utf-8')", "for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year,", "else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original)", "table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\":", "'NOV': 11, 'DEC': 12} year_dict = {'13': 2013, '14': 2014, '15': 2015, '16':", "def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def get_int(original): try: return", "order_id, repo, province, city, supplier_sku_code, sku_color, sku_size, sku_name, sap_division, gender_age, global_category_name, global_focus_name, qty)", "= group_num + 1 for i in range(group_num): records = [] iter_size =", "values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "row_v = table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\":", "'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC':", "minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\",", "finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\",", "cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]),", "while line and line.strip() != '': line = source.readline() if line and line.strip()", "= mysqldb.cursor() sql = '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color,", "to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\", user=\"root\",", "bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s,", "sku_color, sku_size, outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty,", "'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG':", "msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s,", "month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) +", "%s, %s, %s, %s, %s, %s, %s, %s, %s)''' genderage = {'MENS': 0,", "db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year, province,", "= mysqldb.cursor() sql = '''insert into not_deleven_all_{}(out_time, shop, order_id, repo, province, city, supplier_sku_code,", "year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13],", "float(original) except: return 0 if __name__ == \"__main__\": #read_from_excel_to_mysql_11(\"/Users/leon/Desktop/data/new/JORDAN双11发货分析_17.xlsx\", 17) read_from_txt_to_mysql(\"/Users/leon/Desktop/NIKE/NIKE17年数据.txt\", 2017, 5000001)", "12 if dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0]) minute =", "return int(original) except: return 0 def get_float(original): try: return float(original) except: return 0", "and time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return", "data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if", "1 else last_group for j in range(iter_size): index = j + i *", "%s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS': 3,", "and line.strip() != '': line = source.readline() if line and line.strip() != '':", "db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num = table.nrows", "%s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT UNISEX': 2, 'KIDS':", "= int(original) - 41758 return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy", "not ignore and count % 1000000 == 0: mysqldb.commit() else: break mysqldb.commit() except", "getsqldatestr(original): delta = int(original) - 41758 return datetime(2014, 4, 29) + timedelta(days=delta) def", "dt_array[2] == 'PM' and time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1]) second", "row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11]", "def gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0]) month", "time_array[0] != '12' else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return datetime(year,", "int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != ''", "NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1 mysqldb", "1 ignore = True print(\"{}: {}\".format(count, line)) if count >= start: line =", "int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def", "def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor", "!= 0: group_num = group_num + 1 for i in range(group_num): records =", "'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV':", "} print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str):", "ignore = False if not ignore and count % 1000000 == 0: mysqldb.commit()", "line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]),", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8,", "iter_size = group_size if i != group_num - 1 else last_group for j", "\"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\":", "= table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]),", "'': count += 1 ignore = True print(\"{}: {}\".format(count, line)) if count >=", "excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1],", "data[3], data[4], data[5], data[6], data[7], data[8], data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore", "genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\")", "2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client =", "row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6],", "#mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb", "utf-8 -*- import xlrd import pymongo import pymysql from datetime import datetime, timedelta", "gender_age, global_category_name, global_focus_name, qty) values(%s, %s, %s, %s, %s, %s, %s, %s, %s,", "gen_datetime(date_time_str): dt_array = date_time_str.split(' ') date_array = dt_array[0].split('-') day = int(date_array[0]) month =", "= pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into", "%s, %s, %s, %s, %s, %s, %s, %s, %s)'''.format(year) try: count = 0", "break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original) -", "= '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size, genderage,", "%s)'''.format(year) try: count = 0 with open(file, mode='r', encoding='utf-8') as source: line =", "!= '12' else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return datetime(year, month,", "int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file):", "group_num = group_num + 1 for i in range(group_num): records = [] iter_size", "i != group_num - 1 else last_group for j in range(iter_size): index =", "row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]],", "except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758 return", "= excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0],", "year, province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender,", "* group_size + 1 row_v = table.row_values(index) record = { \"owner\": row_v[0], \"code\":", "read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table =", "'15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 =", "%s, %s)'''.format(year) try: count = 0 with open(file, mode='r', encoding='utf-8') as source: line", "return datetime(2014, 4, 29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]),", "pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop,", "double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette,", "== 0: mysqldb.commit() else: break mysqldb.commit() except Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original):", "= '''insert into sales_detail(shop, tb_order_id, platform_id, out_time, province, city, sku_color, sku_size, outer_sku_id, jmsku_code,", "\"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4], \"city\": row_v[5], \"sku_code\": row_v[6], \"bar_code\": row_v[7], \"wms_sku_color_original\":", "python # -*- coding: utf-8 -*- import xlrd import pymongo import pymysql from", "+ 1 row_v = table.row_values(index) record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\":", "= dt_array[1].split('.') hour = int(time_array[0]) + 12 if dt_array[2] == 'PM' and time_array[0]", "- 1 else last_group for j in range(iter_size): index = j + i", "get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except", "excel.sheet_by_index(0) row_num = table.nrows - 1 group_size = 10000 group_num = row_num //", "source.readline() while line and line.strip() != '': line = source.readline() if line and", "= dt_array[0].split('-') day = int(date_array[0]) month = month_dict[date_array[1]] year = year_dict[date_array[2]] time_array =", "year): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor =", "(row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4], row_v[5], row_v[6], row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11],", "!= group_num - 1 else last_group for j in range(iter_size): index = j", "get_int(data[14]))) ignore = False if not ignore and count % 1000000 == 0:", "data[12], data[13], get_int(data[14]))) ignore = False if not ignore and count % 1000000", "if not ignore and count % 1000000 == 0: mysqldb.commit() else: break mysqldb.commit()", "= {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,", "{}\".format(count, line)) if count >= start: line = line.replace('?', '').replace('?', '') data =", "def get_int(original): try: return int(original) except: return 0 def get_float(original): try: return float(original)", "'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict = {'13': 2013, '14':", "traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start", "2014, '15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01", "print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_excel_to_mysql_11(file, year): start = 1", "NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1))", "+= 1 ignore = True print(\"{}: {}\".format(count, line)) if count >= start: line", "if i != group_num - 1 else last_group for j in range(iter_size): index", "get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to", "row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]),", "record.values())) records.append(record) print(\"Bulk write to MongoDB...\") db_test_01.source.insert_many(records) mongo_client.close() def gen_datetime(date_time_str): dt_array = date_time_str.split('", "for j in range(iter_size): index = j + i * group_size + 1", "range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], year, row_v[2], row_v[3], row_v[4],", "if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if row_v[11] != '' else", "row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]), get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21])))", "last_group = row_num - group_num * group_size if last_group != 0: group_num =", "// group_size last_group = row_num - group_num * group_size if last_group != 0:", "price, total_amt, discount_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,", "if count >= start: line = line.replace('?', '').replace('?', '') data = line.split('\\t') cursor.execute(sql,", "-*- import xlrd import pymongo import pymysql from datetime import datetime, timedelta import", "'DEC': 12} year_dict = {'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17':", "mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close()", "passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year,", "10, 'NOV': 11, 'DEC': 12} year_dict = {'13': 2013, '14': 2014, '15': 2015,", "* group_size if last_group != 0: group_num = group_num + 1 for i", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''' genderage", "10000 group_num = row_num // group_size last_group = row_num - group_num * group_size", "get_float(row_v[19]), get_float(row_v[20]), get_float(row_v[21]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW", "pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num =", "records = [] iter_size = group_size if i != group_num - 1 else", "range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5],", "charset=\"utf8\") cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year, province, city,", "row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13], genderage[row_v[14]], get_float(row_v[15]), row_v[16], row_v[17], int(row_v[18]),", "last_group != 0: group_num = group_num + 1 for i in range(group_num): records", "12} year_dict = {'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017}", "row_num = table.nrows - 1 group_size = 10000 group_num = row_num // group_size", "row_v[7], genderage[row_v[8]], row_v[9], get_int(row_v[10]), row_v[11], get_int(row_v[12]), row_v[13], get_float(row_v[14]), get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex))", "get_float(row_v[15]), get_float(row_v[16]))) print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() #mysqldb.commit() print(\"ROW NO.{}", "outer_sku_id, jmsku_code, supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt,", "%s, %s, %s, %s, %s, %s)''' genderage = {'MENS': 0, 'WOMENS': 1, 'ADULT", "encoding='utf-8') as source: line = source.readline() while line and line.strip() != '': line", "row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12], row_v[13],", "group_size = 10000 group_num = row_num // group_size last_group = row_num - group_num", "line.strip() != '': count += 1 ignore = True print(\"{}: {}\".format(count, line)) if", "'12' else int(time_array[0]) minute = int(time_array[1]) second = int(time_array[2]) return datetime(year, month, day,", "sql = '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color, sku_size,", "mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id, name, sku_color,", "data[9], get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if not ignore and", "\"wms_sku_size_original\": row_v[9], \"sku_categories_id\": int(row_v[10]) if row_v[10] != '' else -1, \"sku_type_id\": int(row_v[11]) if", "\"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values())) records.append(record) print(\"Bulk write to MongoDB...\")", "(row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4], row_v[5], row_v[6], row_v[7], row_v[8], row_v[9], row_v[10], get_int(row_v[11]), row_v[12],", "= pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) row_num", "datetime(year, month, day, hour, minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb =", "29) + timedelta(days=delta) def getsqldatefromstr(dstr): mdy = dstr.split('/') return datetime(2000+int(mdy[2]), int(mdy[0]), int(mdy[1])) def", "source.readline() if line and line.strip() != '': count += 1 ignore = True", "read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\", user=\"root\", passwd=\"<PASSWORD>\", db=\"nike_sales\", charset=\"utf8\") cursor =", "supplier_sku_code, bu, global_category, descriptio, genderage, msrp, sihouette, global_category_gender, requested_qty, price, total_amt, discount_amt) values(%s,", "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''' genderage = {'MENS':", "get_int(data[10]), get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if not ignore and count", "'': -1} try: with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex in", "cursor = mysqldb.cursor() sql = '''insert into double_eleven_all(shop, repo, year, province, city, outer_sku_id,", "= month_dict[date_array[1]] year = year_dict[date_array[2]] time_array = dt_array[1].split('.') hour = int(time_array[0]) + 12", "Exception: traceback.print_exc() finally: mysqldb.close() def getsqldatestr(original): delta = int(original) - 41758 return datetime(2014,", "timedelta import traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4,", "outer_sku_id, name, sku_color, sku_size, genderage, global_category, bu, sihouette, requested_qty, global_category_gender, msrp, price, total_amt)", "'').replace('?', '') data = line.split('\\t') cursor.execute(sql, (getsqldatefromstr(data[0]), data[1], data[2], data[3], data[4], data[5], data[6],", "{'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017} def read_from_excel_to_mongo(file): mongo_client", "NO.{} to be inserted\".format(rowIndex+1)) finally: mysqldb.close() def read_from_txt_to_mysql(file, year, start=0): mysqldb = pymysql.connect(host=\"localhost\",", "year_dict = {'13': 2013, '14': 2014, '15': 2015, '16': 2016, '17': 2017} def", "get_int(data[11]), data[12], data[13], get_int(data[14]))) ignore = False if not ignore and count %", "5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11,", "record = { \"owner\": row_v[0], \"code\": row_v[1], \"slip_code\": row_v[2], \"create_time\": gen_datetime(row_v[3]), \"province\": row_v[4],", "month, day, hour, minute, second) def read_from_excel_to_mysql(file): start = 1 mysqldb = pymysql.connect(host=\"localhost\",", "def read_from_excel_to_mongo(file): mongo_client = pymongo.MongoClient() db_test_01 = mongo_client.db_test_01 with xlrd.open_workbook(file) as excel: table", "'OCT': 10, 'NOV': 11, 'DEC': 12} year_dict = {'13': 2013, '14': 2014, '15':", "print(\"ROW NO.{} inserted\".format(rowIndex)) mysqldb.commit() print(\"Completed\") except pymysql.err.DataError: traceback.print_exc() mysqldb.commit() print(\"ROW NO.{} to be", "row_v[11] != '' else -1, \"supplier_code\": row_v[12], \"quantity\": int(row_v[13]) } print(\"{}: {}\".format(index, record.values()))", "# -*- coding: utf-8 -*- import xlrd import pymongo import pymysql from datetime", "with xlrd.open_workbook(file) as excel: table = excel.sheet_by_index(0) for rowIndex in range(start, table.nrows): row_v", "in range(start, table.nrows): row_v = table.row_values(rowIndex) print(row_v) cursor.execute(sql, (row_v[0], row_v[1], row_v[2], getsqldatestr(row_v[3]), row_v[4],", "import traceback month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY':", "last_group for j in range(iter_size): index = j + i * group_size +", "UNISEX': 2, 'KIDS': 3, '': -1} try: with xlrd.open_workbook(file) as excel: table =", "msrp, price, total_amt) values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s," ]
[ "length of the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict", "own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def", "% db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store id as", "ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default # Reference def put_closest_reference(self, acc,", "getters and putters for # data shared between BAP services, so they're not", "def put_fastq_paths(self, paths): '''Stores the fastqs path as its own (pseudo) user input.'''", "put_user_plasmids(self, lst): '''Stores list of plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self,", "datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def", "end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time -", "'''Stores the fastqs path as its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def", "id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self, paths):", "BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this must be", "default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default)", "[])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) #", "return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes',", "# kcri.bap.data # # Defines the data structures that are shared across the", "self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st,", "self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret", "get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default", "str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True) def get_cgmlsts(self): return", "value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' %", "pico.workflow.blackboard import Blackboard ### BAPBlackboard class # # Wraps the generic Blackboard with", "default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores a warning on", "\"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence", "path): '''Stores the contigs path as its own (pseudo) user input.''' self.put_user_input('contigs', path)", "get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return", "the user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root') if not db_root: raise", "self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return", "% param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default) def", "default) def put_graph_path(self, path): '''Stores the path to the GFA file.''' self.put('bap/summary/graph', path)", "self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme,", "so they're not randomly grabbing around # in bags of untyped data. class", "to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def", "self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene):", "self.get_user_input('db_root') if not db_root: raise Exception(\"database root path is not set\") elif not", "return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store id as the sample", "set.''' db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database root path is not", "default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs path as its", "shared between BAP services, so they're not randomly grabbing around # in bags", "ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def add_pmlst(self,", "a warning on the 'bap' top level (note: use service warning instead).''' self.append_to('bap/warnings',", "reads def put_fastq_paths(self, paths): '''Stores the fastqs path as its own (pseudo) user", "list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default # Reference def put_closest_reference(self,", "lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True)", "sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return", "get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids", "= self.get_user_input('db_root') if not db_root: raise Exception(\"database root path is not set\") elif", "sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids specified by", "getters and putters specific to the shared data definitions in the current BAP.'''", "methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs',", "get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def", "path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path", "databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this must be set.''' db_root", "get_db_root(self): '''Retrieve the user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root') if not", "default) def put_user_contigs_path(self, path): '''Stores the contigs path as its own (pseudo) user", "desc): '''Stores the accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc)", "sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return", "self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time", "put_assembled_contigs_path(self, path): '''Stores the path to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self,", "default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci,", "service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start'))", "st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene):", "with an API that adds getters and putters for # data shared between", "self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the", "str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst',", "adds getters and putters for # data shared between BAP services, so they're", "path): '''Stores the path to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None):", "'%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True) def get_cgmlsts(self): return sorted(self.get('bap/summary/cgmlst', []))", "file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self,", "def put_user_plasmids(self, lst): '''Stores list of plasmids specified by user.''' self.put_user_input('plasmids', lst) def", "default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def", "an API that adds getters and putters for # data shared between BAP", "def put_user_contigs_path(self, path): '''Stores the contigs path as its own (pseudo) user input.'''", "the accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self,", "sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if", "data definitions in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods", "# BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start',", "add_warning(self, warning): '''Stores a warning on the 'bap' top level (note: use service", "def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self):", "def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) #", "return ret if ret else default # Reference def put_closest_reference(self, acc, desc): '''Stores", "and putters for # data shared between BAP services, so they're not randomly", "path, length, or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path',", "paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs", "closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path to", "datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value):", "elif not os.path.isdir(db_root): raise Exception(\"db root path is not a directory: %s\" %", "#!/usr/bin/env python3 # # kcri.bap.data # # Defines the data structures that are", "def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self,", "default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path to the computed", "### BAPBlackboard class # # Wraps the generic Blackboard with an API that", "paths): '''Stores the fastqs path as its own (pseudo) user input.''' self.put_user_input('fastqs', paths)", "def put_db_root(self, path): '''Stores the root of the BAP services databases.''' self.put_user_input('db_root', path)", "computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path):", "[])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def", "self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst):", "species specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def", "class # # Wraps the generic Blackboard with an API that adds getters", "MLST def add_mlst(self, st, loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l:", "path): '''Stores the path to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None):", "get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def", "return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations',", "enum from datetime import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class #", "= list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default # Reference def", "% (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def", "not db_root: raise Exception(\"database root path is not set\") elif not os.path.isdir(db_root): raise", "def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st, pct): str", "def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self):", "default=None): '''Returns dict with fields accession, name, path, length, or the default.''' return", "user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database", "default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default))", "return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes',", "return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return", "set\") elif not os.path.isdir(db_root): raise Exception(\"db root path is not a directory: %s\"", "True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene,", "def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self,", "the root of the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the", "# Contigs and reads def put_fastq_paths(self, paths): '''Stores the fastqs path as its", "self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid,", "end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self,", "mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st,", "ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def add_pmlst(self, profile, st): str", "from datetime import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class # #", "be set.''' db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database root path is", "if ret else default # Reference def put_closest_reference(self, acc, desc): '''Stores the accession", "def add_pmlst(self, profile, st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def", "% l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) #", "= \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str,", "default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret", "the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self,", "\"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True)", "self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path to the computed contigs.''' self.put('bap/summary/contigs',", "# MLST def add_mlst(self, st, loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda", "its own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default)", "the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service,", "length, or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default)", "'''Stores the accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def", "acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path to the closest reference", "self.append_to('bap/warnings', warning) # Standard methods for BAP common data def put_db_root(self, path): '''Stores", "return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret", "definitions in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def", "'''Adds to the generic Blackboard getters and putters specific to the shared data", "default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None):", "Species def put_user_species(self, lst): '''Stores list of species specified by user.''' self.put_user_input('species', lst)", "self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs path as its own (pseudo)", "lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret =", "'''Stores the length of the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None):", "list of species specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species',", "accession, name, path, length, or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None):", "user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species',", "return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs path as its own", "services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this must be set.'''", "lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True)", "id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs", "True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def", "path) def get_db_root(self): '''Retrieve the user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root')", "as its own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs',", "return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci, alleles): str = \"%s[%s]\"", "the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads", "is not a directory: %s\" % db_root) return os.path.abspath(db_root) # Sample ID def", "path as its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return", "root of the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root,", "genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with fields accession, name, path,", "sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', []))", "plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def", "def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path to", "Blackboard with an API that adds getters and putters for # data shared", "path to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default)", "self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci, alleles): str = \"%s[%s]\" %", "[])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes',", "the path to the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores", "# Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', []))", "st, pct): str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True) def", "input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores", "(pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self,", "default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return", "Reference def put_closest_reference(self, acc, desc): '''Stores the accession and description of closest reference.'''", "the 'bap' top level (note: use service warning instead).''' self.append_to('bap/warnings', warning) # Standard", "BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service, version, user_inputs):", "# Species def put_user_species(self, lst): '''Stores list of species specified by user.''' self.put_user_input('species',", "'''Stores list of plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return", "def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def", "get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs path as", "# # Defines the data structures that are shared across the BAP services.", "common data def put_db_root(self, path): '''Stores the root of the BAP services databases.'''", "the path to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs',", "ID def put_sample_id(self, id): '''Store id as the sample id in the summary.'''", "directory: %s\" % db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store", "reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path to the", "the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length of", "profile, st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return", "shared across the BAP services. # import os, enum from datetime import datetime", "self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length',", "def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci, alleles):", "import Blackboard ### BAPBlackboard class # # Wraps the generic Blackboard with an", "def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def", "Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids specified by user.''' self.put_user_input('plasmids', lst)", "(note: use service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP common", "def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST", "user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids',", "sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' %", "alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles))))", "param, default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores a warning", "of untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and putters", "put_sample_id(self, id): '''Store id as the sample id in the summary.''' self.put('bap/summary/sample_id', id)", "put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None): return", "path is not a directory: %s\" % db_root) return os.path.abspath(db_root) # Sample ID", "or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def", "def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else", "not a directory: %s\" % db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self,", "return ret if ret else default def add_pmlst(self, profile, st): str = \"%s%s\"", "fastqs path as its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None):", "putters for # data shared between BAP services, so they're not randomly grabbing", "Wraps the generic Blackboard with an API that adds getters and putters for", "as its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs',", "Exception(\"database root path is not set\") elif not os.path.isdir(db_root): raise Exception(\"db root path", "def add_warning(self, warning): '''Stores a warning on the 'bap' top level (note: use", "# Standard methods for BAP common data def put_db_root(self, path): '''Stores the root", "- start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value)", "def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self):", "self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default)", "add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def", "self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst):", "not set\") elif not os.path.isdir(db_root): raise Exception(\"db root path is not a directory:", "'''Stores the path to the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length):", "in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and", "sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids',", "gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self,", "= \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) #", "cgMLST def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' % (scheme, st, pct)", "list of plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids',", "self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut,", "path): '''Stores the root of the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self):", "self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this must be set.''' db_root =", "'''Stores a warning on the 'bap' top level (note: use service warning instead).'''", "default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default)", "def add_mlst(self, st, loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s'", "specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self,", "add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self,", "add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut):", "pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True)", "loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci,", "the contigs path as its own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self,", "that are shared across the BAP services. # import os, enum from datetime", "is not set\") elif not os.path.isdir(db_root): raise Exception(\"db root path is not a", "gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes',", "def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list()))", "Contigs and reads def put_fastq_paths(self, paths): '''Stores the fastqs path as its own", "BAP services, so they're not randomly grabbing around # in bags of untyped", "import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class # # Wraps the", "self.get('bap/summary/species', default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if", "version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time =", "its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default)", "service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self,", "for BAP common data def put_db_root(self, path): '''Stores the root of the BAP", "services. # import os, enum from datetime import datetime from pico.workflow.blackboard import Blackboard", "self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores a warning on the 'bap'", "def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores list", "ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def add_pmlst(self, profile, st): str =", "def put_graph_path(self, path): '''Stores the path to the GFA file.''' self.put('bap/summary/graph', path) def", "id as the sample id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return", "self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self, paths): '''Stores the fastqs path", "'''Returns dict with fields accession, name, path, length, or the default.''' return self.get('bap/summary/closest',", "end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s'", "% (st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def", "mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self,", "param, value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s'", "closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length of the", "def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs)", "(profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self,", "gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True)", "True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st, pct):", "% param, default) def add_warning(self, warning): '''Stores a warning on the 'bap' top", "def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self,", "= datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state)", "of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path", "(end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param,", "accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path):", "for # data shared between BAP services, so they're not randomly grabbing around", "'''Stores the path to the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return", "dict with fields accession, name, path, length, or the default.''' return self.get('bap/summary/closest', default)", "put_closest_reference(self, acc, desc): '''Stores the accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc)", "# Defines the data structures that are shared across the BAP services. #", "db_root: raise Exception(\"database root path is not set\") elif not os.path.isdir(db_root): raise Exception(\"db", "value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning):", "def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def", "True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def", "super().__init__(verbose) # BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version)", "put_db_root(self, path): '''Stores the root of the BAP services databases.''' self.put_user_input('db_root', path) def", "def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path to", "of species specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default)", "add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes):", "get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return", "to the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length", "if not db_root: raise Exception(\"database root path is not set\") elif not os.path.isdir(db_root):", "add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def", "l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids", "Defines the data structures that are shared across the BAP services. # import", "self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno,", "user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path):", "length) def get_closest_reference(self, default=None): '''Returns dict with fields accession, name, path, length, or", "python3 # # kcri.bap.data # # Defines the data structures that are shared", "self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length of the closest reference genome.'''", "user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration',", "instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP common data def put_db_root(self, path):", "ret else default # Reference def put_closest_reference(self, acc, desc): '''Stores the accession and", "str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene,", "os, enum from datetime import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class", "pct): str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True) def get_cgmlsts(self):", "default def add_pmlst(self, profile, st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str)", "services, so they're not randomly grabbing around # in bags of untyped data.", "path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores", "def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def", "that adds getters and putters for # data shared between BAP services, so", "Blackboard ### BAPBlackboard class # # Wraps the generic Blackboard with an API", "return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores a warning on the", "contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores", "root path is not set\") elif not os.path.isdir(db_root): raise Exception(\"db root path is", "of the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this", "fields accession, name, path, length, or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self,", "def put_sample_id(self, id): '''Store id as the sample id in the summary.''' self.put('bap/summary/sample_id',", "state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param,", "default) def add_warning(self, warning): '''Stores a warning on the 'bap' top level (note:", "Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) #", "get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores a", "def get_closest_reference(self, default=None): '''Returns dict with fields accession, name, path, length, or the", "warning on the 'bap' top level (note: use service warning instead).''' self.append_to('bap/warnings', warning)", "param, default) def add_warning(self, warning): '''Stores a warning on the 'bap' top level", "= list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def add_pmlst(self, profile,", "return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path to the computed contigs.'''", "by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst):", "get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self,", "(pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self,", "# Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids specified by user.''' self.put_user_input('plasmids',", "start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def", "the computed contigs.''' self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self,", "def get_db_root(self): '''Retrieve the user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root') if", "# Wraps the generic Blackboard with an API that adds getters and putters", "put_fastq_paths(self, paths): '''Stores the fastqs path as its own (pseudo) user input.''' self.put_user_input('fastqs',", "put_closest_reference_length(self, length): '''Stores the length of the closest reference genome.''' self.put('bap/summary/closest/length', length) def", "self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time =", "def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True)", "the BAP services. # import os, enum from datetime import datetime from pico.workflow.blackboard", "the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve the user_input/db_root, this must", "path is not set\") elif not os.path.isdir(db_root): raise Exception(\"db root path is not", "[])) # Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids specified by user.'''", "the path to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph',", "def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True)", "the fastqs path as its own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self,", "True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores list", "= '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True) def get_cgmlsts(self): return sorted(self.get('bap/summary/cgmlst',", "str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', []))", "default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def", "user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time", "os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store id as the sample id", "warning) # Standard methods for BAP common data def put_db_root(self, path): '''Stores the", "get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self,", "def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance", "self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret", "True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list()", "add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno):", "BAP common data def put_db_root(self, path): '''Stores the root of the BAP services", "def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst',", "plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None):", "def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self, paths): '''Stores", "start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value) def", "# Reference def put_closest_reference(self, acc, desc): '''Stores the accession and description of closest", "os.path.isdir(db_root): raise Exception(\"db root path is not a directory: %s\" % db_root) return", "desc) def put_closest_reference_path(self, path): '''Stores the path to the closest reference genome.''' self.put('bap/summary/closest/path',", "generic Blackboard with an API that adds getters and putters for # data", "zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def", "not randomly grabbing around # in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds", "(st, ','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self):", "default # Reference def put_closest_reference(self, acc, desc): '''Stores the accession and description of", "self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self,", "sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', []))", "must be set.''' db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database root path", "get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self, paths): '''Stores the", "default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None):", "self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores list of species specified by", "add_pmlst(self, profile, st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self):", "def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else", "raise Exception(\"db root path is not a directory: %s\" % db_root) return os.path.abspath(db_root)", "and putters specific to the shared data definitions in the current BAP.''' def", "self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes',", "'''Stores list of species specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return", "path): '''Stores the path to the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self,", "current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service, version,", "self.put('bap/summary/contigs', path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the", "# # Wraps the generic Blackboard with an API that adds getters and", "return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return", "# Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes', []))", "the generic Blackboard with an API that adds getters and putters for #", "length): '''Stores the length of the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self,", "the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with fields", "untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and putters specific", "data def put_db_root(self, path): '''Stores the root of the BAP services databases.''' self.put_user_input('db_root',", "'''Stores the path to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return", "put_user_species(self, lst): '''Stores list of species specified by user.''' self.put_user_input('species', lst) def get_user_species(self,", "path to the closest reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the", "reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with fields accession, name,", "return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param,", "sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations', mut, True) def get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', []))", "to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) #", "ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default # Reference", "def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores list of", "input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores", "return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)'", "the length of the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns", "'''Store id as the sample id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self):", "default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default #", "datetime import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class # # Wraps", "else default def add_pmlst(self, profile, st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts',", "def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the contigs path", "and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores", "# import os, enum from datetime import datetime from pico.workflow.blackboard import Blackboard ###", "lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None):", "data structures that are shared across the BAP services. # import os, enum", "path) def put_closest_reference_length(self, length): '''Stores the length of the closest reference genome.''' self.put('bap/summary/closest/length',", "bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and", "closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with fields accession,", "reference genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length of the closest", "with fields accession, name, path, length, or the default.''' return self.get('bap/summary/closest', default) def", "on the 'bap' top level (note: use service warning instead).''' self.append_to('bap/warnings', warning) #", "l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst',", "get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list()))", "'''Stores the root of the BAP services databases.''' self.put_user_input('db_root', path) def get_db_root(self): '''Retrieve", "description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the", "state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds())", "True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret = list()", "def put_closest_reference_path(self, path): '''Stores the path to the closest reference genome.''' self.put('bap/summary/closest/path', path)", "default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path to the GFA", "by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid):", "warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP common data def put_db_root(self,", "def put_closest_reference(self, acc, desc): '''Stores the accession and description of closest reference.''' self.put('bap/summary/closest/accession',", "self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst,", "in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters", "the shared data definitions in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) #", "default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores list of species", "default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci, alleles): str =", "shared data definitions in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level", "raise Exception(\"database root path is not set\") elif not os.path.isdir(db_root): raise Exception(\"db root", "__init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service)", "putters specific to the shared data definitions in the current BAP.''' def __init__(self,", "summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def", "'unknown') # Contigs and reads def put_fastq_paths(self, paths): '''Stores the fastqs path as", "default) def put_assembled_contigs_path(self, path): '''Stores the path to the computed contigs.''' self.put('bap/summary/contigs', path)", "# # kcri.bap.data # # Defines the data structures that are shared across", "def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time", "get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores list of", "path) def get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path", "self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds'))", "a directory: %s\" % db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id):", "Blackboard getters and putters specific to the shared data definitions in the current", "id): '''Store id as the sample id in the summary.''' self.put('bap/summary/sample_id', id) def", "put_graph_path(self, path): '''Stores the path to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self,", "self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path to the GFA file.''' self.put('bap/summary/graph',", "ret if ret else default # Reference def put_closest_reference(self, acc, desc): '''Stores the", "ret if ret else default def add_pmlst(self, profile, st): str = \"%s%s\" %", "default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret", "def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' % param, value) def get_user_input(self, param, default=None):", "the data structures that are shared across the BAP services. # import os,", "BAPBlackboard class # # Wraps the generic Blackboard with an API that adds", "service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP common data def", "classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes',", "get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST", "def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self, warning): '''Stores", "add_mlst(self, st, loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' %", "plasmid, True) def get_detected_plasmids(self, default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret =", "self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self,", "grabbing around # in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to the", "Standard methods for BAP common data def put_db_root(self, path): '''Stores the root of", "= datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param,", "lst): '''Stores list of plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None):", "top level (note: use service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for", "get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes, True) def get_amr_classes(self): return", "warning): '''Stores a warning on the 'bap' top level (note: use service warning", "[])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def", "lst): '''Stores list of species specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None):", "path to the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default)", "default) # MLST def add_mlst(self, st, loci, alleles): str = \"%s[%s]\" % (st,", "[])) # cgMLST def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' % (scheme,", "and reads def put_fastq_paths(self, paths): '''Stores the fastqs path as its own (pseudo)", "as the sample id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id',", "True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def", "return self.get('bap/summary/graph', default) # Species def put_user_species(self, lst): '''Stores list of species specified", "across the BAP services. # import os, enum from datetime import datetime from", "BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and putters specific to the shared", "self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path to the closest", "db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database root path is not set\")", "randomly grabbing around # in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to", "return self.get('bap/summary/species', default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret", "self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status', state) def put_user_input(self, param, value): return self.put('bap/user_inputs/%s' %", "db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store id as the", "user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path):", "classes, True) def get_amr_classes(self): return sorted(self.get('bap/summary/amr_classes', [])) def add_amr_phenotype(self, pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True)", "return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path to the GFA file.'''", "use service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP common data", "this must be set.''' db_root = self.get_user_input('db_root') if not db_root: raise Exception(\"database root", "datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end',", "not os.path.isdir(db_root): raise Exception(\"db root path is not a directory: %s\" % db_root)", "start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now() self.put('bap/run_info/time/end', end_time.isoformat(timespec='seconds')) self.put('bap/run_info/time/duration', (end_time - start_time).total_seconds()) self.put('bap/run_info/status',", "list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return ret if ret else default def add_pmlst(self, profile, st):", "return sorted(self.get('bap/summary/pmlsts', [])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self):", "ret else default def add_pmlst(self, profile, st): str = \"%s%s\" % (profile, st)", "def put_user_species(self, lst): '''Stores list of species specified by user.''' self.put_user_input('species', lst) def", "st): str = \"%s%s\" % (profile, st) self.append_to('bap/summary/pmlsts', str) def get_pmlsts(self): return sorted(self.get('bap/summary/pmlsts',", "class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and putters specific to the", "ret.extend(self.get_detected_species(list())) return ret if ret else default # Reference def put_closest_reference(self, acc, desc):", "scheme, st, pct): str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str, True)", "self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state): start_time = datetime.fromisoformat(self.get('bap/run_info/time/start')) end_time = datetime.now()", "own (pseudo) user input.''' self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def", "Exception(\"db root path is not a directory: %s\" % db_root) return os.path.abspath(db_root) #", "str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores", "'bap' top level (note: use service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods", "%s\" % db_root) return os.path.abspath(db_root) # Sample ID def put_sample_id(self, id): '''Store id", "add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self,", "default=None): return sorted(self.get('bap/summary/plasmids', default)) def get_plasmids(self, default=None): ret = list() ret.extend(self.get_user_plasmids(list())) ret.extend(self.get_detected_plasmids(list())) return", "if ret else default def add_pmlst(self, profile, st): str = \"%s%s\" % (profile,", "data shared between BAP services, so they're not randomly grabbing around # in", "between BAP services, so they're not randomly grabbing around # in bags of", "# data shared between BAP services, so they're not randomly grabbing around #", "get_user_contigs_path(self, default=None): return self.get_user_input('contigs', default) def put_assembled_contigs_path(self, path): '''Stores the path to the", "acc, desc): '''Stores the accession and description of closest reference.''' self.put('bap/summary/closest/accession', acc) self.put('bap/summary/closest/name',", "st, loci, alleles): str = \"%s[%s]\" % (st, ','.join(map(lambda l: '%s:%s' % l,", "specific to the shared data definitions in the current BAP.''' def __init__(self, verbose=False):", "verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version',", "# cgMLST def add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' % (scheme, st,", "level (note: use service warning instead).''' self.append_to('bap/warnings', warning) # Standard methods for BAP", "param, value) def get_user_input(self, param, default=None): return self.get('bap/user_inputs/%s' % param, default) def add_warning(self,", "genome.''' self.put('bap/summary/closest/path', path) def put_closest_reference_length(self, length): '''Stores the length of the closest reference", "name, path, length, or the default.''' return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return", "of plasmids specified by user.''' self.put_user_input('plasmids', lst) def get_user_plasmids(self, default=None): return sorted(self.get_user_input('plasmids', default))", "methods for BAP common data def put_db_root(self, path): '''Stores the root of the", "root path is not a directory: %s\" % db_root) return os.path.abspath(db_root) # Sample", "pheno): self.append_to('bap/summary/amr_phenotypes', pheno, True) def get_amr_phenotypes(self): return sorted(self.get('bap/summary/amr_phenotypes', [])) def add_amr_mutation(self, mut): self.append_to('bap/summary/amr_mutations',", "'''Stores the contigs path as its own (pseudo) user input.''' self.put_user_input('contigs', path) def", "to the shared data definitions in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose)", "are shared across the BAP services. # import os, enum from datetime import", "default) # Species def put_user_species(self, lst): '''Stores list of species specified by user.'''", "API that adds getters and putters for # data shared between BAP services,", "get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list())) return ret if ret else default", "get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self, st, loci, alleles): str", "add_cgmlst(self, scheme, st, pct): str = '%s:%s(%s%%)' % (scheme, st, pct) self.append_to('bap/summary/cgmlst', str,", "around # in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic", "GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species def", "Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def", "contigs path as its own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None):", "Sample ID def put_sample_id(self, id): '''Store id as the sample id in the", "return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return self.get_closest_reference({}).get('length', default) # MLST def add_mlst(self,", "def put_assembled_contigs_path(self, path): '''Stores the path to the computed contigs.''' self.put('bap/summary/contigs', path) def", "return self.get('bap/summary/sample_id', 'unknown') # Contigs and reads def put_fastq_paths(self, paths): '''Stores the fastqs", "get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list())) ret.extend(self.get_detected_species(list()))", "specified by user.''' self.put_user_input('species', lst) def get_user_species(self, default=None): return self.get_user_input('species', default) def add_detected_species(self,", "# Sample ID def put_sample_id(self, id): '''Store id as the sample id in", "return sorted(self.get('bap/summary/virulence_genes', [])) # Resistance def add_amr_gene(self, gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self):", "put_closest_reference_path(self, path): '''Stores the path to the closest reference genome.''' self.put('bap/summary/closest/path', path) def", "the generic Blackboard getters and putters specific to the shared data definitions in", "return self.get('bap/summary/closest', default) def get_closest_reference_path(self, default=None): return self.get_closest_reference({}).get('path', default) def get_closest_reference_length(self, default=None): return", "def put_closest_reference_length(self, length): '''Stores the length of the closest reference genome.''' self.put('bap/summary/closest/length', length)", "self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes', classes,", "get_closest_reference(self, default=None): '''Returns dict with fields accession, name, path, length, or the default.'''", "they're not randomly grabbing around # in bags of untyped data. class BAPBlackboard(Blackboard):", "BAP services. # import os, enum from datetime import datetime from pico.workflow.blackboard import", "from pico.workflow.blackboard import Blackboard ### BAPBlackboard class # # Wraps the generic Blackboard", "get_amr_mutations(self): return sorted(self.get('bap/summary/amr_mutations', [])) # cgMLST def add_cgmlst(self, scheme, st, pct): str =", "data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard getters and putters specific to", "the GFA file.''' self.put('bap/summary/graph', path) def get_graph_path(self, default=None): return self.get('bap/summary/graph', default) # Species", "the sample id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown')", "sample id in the summary.''' self.put('bap/summary/sample_id', id) def get_sample_id(self): return self.get('bap/summary/sample_id', 'unknown') #", "'''Retrieve the user_input/db_root, this must be set.''' db_root = self.get_user_input('db_root') if not db_root:", "[])) # Virulence def add_detected_virulence_gene(self, gene): self.append_to('bap/summary/virulence_genes', gene, True) def get_virulence_genes(self): return sorted(self.get('bap/summary/virulence_genes',", "gene): self.append_to('bap/summary/amr_genes', gene, True) def get_amr_genes(self): return sorted(self.get('bap/summary/amr_genes', [])) def add_amr_classes(self, classes): self.append_to('bap/summary/amr_classes',", "self.get_user_input('species', default) def add_detected_species(self, lst): self.append_to('bap/summary/species', lst, True) def get_detected_species(self, default=None): return self.get('bap/summary/species',", "self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with fields accession, name, path, length,", "self.put_user_input('fastqs', paths) def get_fastq_paths(self, default=None): return self.get_user_input('fastqs', default) def put_user_contigs_path(self, path): '''Stores the", "<reponame>zwets/kcri-cge-bap #!/usr/bin/env python3 # # kcri.bap.data # # Defines the data structures that", "datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard class # # Wraps the generic", "def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service',", "of the closest reference genome.''' self.put('bap/summary/closest/length', length) def get_closest_reference(self, default=None): '''Returns dict with", "import os, enum from datetime import datetime from pico.workflow.blackboard import Blackboard ### BAPBlackboard", "alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self,", "get_assembled_contigs_path(self, default=None): return self.get('bap/summary/contigs', default) def put_graph_path(self, path): '''Stores the path to the", "else default # Reference def put_closest_reference(self, acc, desc): '''Stores the accession and description", "path as its own (pseudo) user input.''' self.put_user_input('contigs', path) def get_user_contigs_path(self, default=None): return", "def get_detected_species(self, default=None): return self.get('bap/summary/species', default) def get_species(self, default=None): ret = list() ret.extend(self.get_user_species(list()))", "default=None): return sorted(self.get_user_input('plasmids', default)) def add_detected_plasmid(self, plasmid): self.append_to('bap/summary/plasmids', plasmid, True) def get_detected_plasmids(self, default=None):", "return sorted(self.get('bap/summary/mlst', [])) # Plasmids def put_user_plasmids(self, lst): '''Stores list of plasmids specified", "','.join(map(lambda l: '%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return", "version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds')) self.put('bap/user_inputs', user_inputs) def end_run(self, state):", "generic Blackboard getters and putters specific to the shared data definitions in the", "self.put('bap/summary/closest/name', desc) def put_closest_reference_path(self, path): '''Stores the path to the closest reference genome.'''", "kcri.bap.data # # Defines the data structures that are shared across the BAP", "'%s:%s' % l, zip(loci, alleles)))) self.append_to('bap/summary/mlst', str, True) def get_mlsts(self): return sorted(self.get('bap/summary/mlst', []))", "# in bags of untyped data. class BAPBlackboard(Blackboard): '''Adds to the generic Blackboard", "put_user_contigs_path(self, path): '''Stores the contigs path as its own (pseudo) user input.''' self.put_user_input('contigs',", "in the current BAP.''' def __init__(self, verbose=False): super().__init__(verbose) # BAP-level methods def start_run(self,", "structures that are shared across the BAP services. # import os, enum from", "BAP-level methods def start_run(self, service, version, user_inputs): self.put('bap/run_info/service', service) self.put('bap/run_info/version', version) self.put('bap/run_info/time/start', datetime.now().isoformat(timespec='seconds'))", "to the generic Blackboard getters and putters specific to the shared data definitions" ]
[ "\"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\")", "\" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part IoU", "cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data", "+ '\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] *", "sampled faces for face in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] =", "== prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index,", "os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground: N x", "print(best_model_fn[i]) buf += \"Best model iou: \" + str(best_iou_model[i]) + \", \" +", "31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool:", "2)) + '\\n' \\ \"Mesh Part IoU From Triangles - FR: \" +", "face_labels[np.where(components == int(comp))[0]] = label return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels,", "\"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) +", "mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels,", "= np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels)", "face_area is not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T,", "return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for evaluation :param", "\"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2))", "NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR =", "Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\", "get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) #", "= get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels,", "read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR", "= [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx],", "# Calculate avg point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou =", "in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n'", "\"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR =", "+ \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis]", "+ str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From", "for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0", "Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point", "+ \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2))", "needed for evaluation :param model_name: str :return: vertices: N x 3, numpy.ndarray(float) faces:", "sampled) # faces with no sample points face_centers = compute_face_centers(faces, unsampled, vertices) #", "N x 1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\" # Convert numpy", "1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr", "label in toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR, \"results_log.txt\"),", "BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR))", "assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\"", "as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:,", "'\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100,", "\"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR,", "== point_gt_labels.shape[0]) # Get per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn", "IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr)", "face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels,", "IoU - FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\", "* 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp: \" +", "(os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\")", "\" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "= np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1", "face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate point", "\"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR))", "\"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) +", "(label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool:", "\"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr", "from comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0]", "faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area faces -=", "[label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values()", "len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool =", "in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) /", "face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if", "mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool", "-= 1 # restore to original values # Calculate avg point part and", "len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i])", "face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and return model", "avg. feat. , that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use", "points: N x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x", "(len(toplabels) - 1)) # Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] +", "!= \"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n'", "point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn]", "numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M", "comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] *", "for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\"", "feat. , that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg", "and mesh tracks\") for model_fn in tqdm(models_fn): # Get point cloud data points,", "tracks\") for model_fn in tqdm(models_fn): # Get point cloud data points, point_gt_labels, point_pred_labels,", "\\ \"Point Part IoU: \" + str( np.round(point_part_iou['all'] * 100, 2)) + '\\n'", "+= \"Point Classification Accuracy: \" + str(np.round(point_acc * 100, 2)) + '\\n' \\", "point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels,", "face_point_index = {} # Find faces that have no corresponding points sampled =", "to triangles # Find nearest point and assign its point feature to each", "1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool =", "np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during training", "point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x", "p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels,", "fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis]", "+ str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From", "= os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground,", "in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Per label mesh part", "assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR,", "in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0 if s_iou", "Calculate avg point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou)", "point predictions to triangles and components through avg pooling :param vertices: N x", "_ in range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp,", "truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json)", "mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate part and shape", "\" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if label !=", ":return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M", "for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices, faces,", "from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\",", "corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces with", "+ 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool =", "get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn]", "face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn]", "\"Point Classification Accuracy: \" + str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point", "= get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\", "Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\", "in range(top_k)], \\ [[] for _ in range(top_k)] # Get model names models_fn", "classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels,", "+ \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp,", "get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)", "from tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers", "= np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool", "= np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool =", "in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx in", "models_fn: list(str) \"\"\" models_fn = [] with open(split_fn, 'r') as fin: for line", "for idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model -=", "face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn", "= sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR,", "is not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground", "= set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points", "Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n'", "Transfer point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components ==", "Part IoU - FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n'", "range(top_k)], \\ [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)]", "\" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] +", "data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from", "= os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR =", "+ '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr *", "x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float)", "assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\")", "N x 3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param components: M", "face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x", "= calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components to labels with", "labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\", "point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc,", "= get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp =", "np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in", "comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label", "with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k =", "= np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _", "open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components)", "'\\n' \\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] *", "assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred", "Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area)", "0) during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face index with", "# Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate", "\\ \"Per label point part IoU: \" + \", \".join([label + \": \"", "os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR,", "points, point_feat, point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels)", ":param model_name: str :return: points: N x 3, numpy.ndarray(float) point_gt_labels: N x 1,", "\"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR,", ":param model_name: str :return: vertices: N x 3, numpy.ndarray(float) faces: M x 3,", "point_gt_labels.shape[0]) # Get per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn +", "Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n'", "Save labels in json format :param labels: N x 1, numpy.ndarray(int) :param fn_json:", "each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: # unsampled", "no corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces", "\".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0])", "= np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc", "N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction) ground =", "best buf = '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best", "Transfer point predictions to triangles and components through avg pooling :param vertices: N", "100, 2)) + '\\n' \\ \"Point Part IoU: \" + str( np.round(point_part_iou['all'] *", "mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use", "part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr)", ":param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1,", "if max_pool: # unsampled faces have only one point, so max == avg.", "mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) #", "tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import", "x 1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\" # Convert numpy to", "\"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool", "= accuracy[0] else: accuracy = np.sum(ground == prediction) / float(len(ground)) return accuracy def", "x 3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param points: K x", "open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k = 200", "\"\"\" Classification accuracy :param ground: N x 1, numpy.ndarray(int) :param prediction: N x", "s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle", "model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and", "iou: \" + str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR,", "# Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building", "assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred label point_pred_labels = np.argmax(point_feat,", "+ '\\n' \\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part']", "Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n'", "mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate part", "numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction =", "vertices: N x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M x", "\"Best model iou: \" + str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n'", "np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0)", "components, points, point_feat, point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels,", "restore to original values # Calculate avg point part and shape IOU point_shape_iou", "point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] =", "face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn):", "classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR,", "x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get points points,", "100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp- FR: \" +", "nearest point and assign its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool,", "\"Per label mesh part IoU from triangles: \" + \", \".join( [label +", "len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1))", "comp_idx).nonzero()[0] point_idx = [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx]", "to original values # Calculate avg point part and shape IOU point_shape_iou =", "assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name +", "compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to triangles # Find nearest point", "\"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2))", "BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh features", "face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area)", "+ str(np.round( point_part_iou[ label] * 100, 2)) for label in toplabels.values() if label", "# BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR))", "mask.nonzero()[0].tolist() # Transfer point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx =", "float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for i in range(top_k): print(best_iou_model[i]);", "numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx =", "Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"),", "1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components to", "model_name: str :return: vertices: N x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int)", "compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep,", "\\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2))", "= get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()]) /", "in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \" + str(best_iou_model[i]) +", "np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool", "np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that have no corresponding points sampled", "\\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2))", "FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per label", "str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp:", "= face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model", "ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json =", "face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx =", "\"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get", "Classification Accuracy: \" + str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point Shape", "\" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x", "\"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None):", "+ 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool,", "face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} #", "Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) +", "+ 1 # we exclude undetermined (label 0) during training assert (point_gt_labels.shape ==", "non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area", "shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr =", "Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "if __name__ == \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9", "labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels", "os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR", "points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index =", "\", \".join([label + \": \" + str(np.round( point_part_iou[ label] * 100, 2)) for", "* 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \" +", "== \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred,", "\" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in toplabels.values() if", "\\ \"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] *", "save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i]", "+ str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float(", "Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n'", "assign its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points,", "handle cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou", "sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: #", "str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per label point part IoU:", "np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles -", "best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1]", "2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all']", "model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:,", "face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face]", "feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool:", "Use avg pooling for sampled faces for face in sampled: mask = np.squeeze(point_face_index", "point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels, components, face_area =", "= compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to triangles # Find nearest", "{}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {},", "numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float)", "1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool:", "features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat =", "/ float(len(label_iou)) + 1 # handle cases where iou=0 if s_iou > best_iou_model[-1]:", "= os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\")", "\"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR,", "predictions to triangles and components through avg pooling :param vertices: N x 3,", "[label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values()", "best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)],", "best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1", "\\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2))", "None \"\"\" # Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export", "toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR, \"results_log.txt\"), 'w') as", "faces: M x 3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param points:", "mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR,", "face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x", "- FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh", "{}, {} print(\"Calculate part and shape IOU for point and mesh tracks\") for", "= np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf", "model_name: str :return: points: N x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int)", "# unsampled faces have only one point, so max == avg. feat. ,", "\"\"\" Get point cloud data needed for evaluation :param model_name: str :return: points:", "fn_json): \"\"\" Save labels in json format :param labels: N x 1, numpy.ndarray(int)", "os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert", "exist_ok=True) # Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR,", "original values # Calculate avg point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou)", "= os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1] assert", "if label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR, \"results_log.txt\"), 'w') as fout_txt:", "best_model_fn = [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)],", "\" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU", "str :return: points: N x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels:", "np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in", "Get point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get", "\".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in", "in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n'", "= 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn =", "(point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred label", "for model_fn in tqdm(models_fn): # Get point cloud data points, point_gt_labels, point_pred_labels, point_feat,", "\\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and return model names :param", "= np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during", "results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True)", "x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int)", "vertices) # Transfer point predictions to triangles # Find nearest point and assign", "31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get points points, _", "0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool", "label return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels", "pooling :param vertices: N x 3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int)", "faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read", "K x 3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K", "BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert", "classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground: N x 1, numpy.ndarray(int) :param", "+ '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \"", "N x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x 31,", "face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v", "\", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label", "from triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] *", "set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points face_centers", "os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR,", "features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True)", "as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] ==", "mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc", "N x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x 1,", "\" + str(np.round( point_part_iou[ label] * 100, 2)) for label in toplabels.values() if", "x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int)", "\".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in", "str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles:", "# Get point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) #", "sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1 # restore", "x 1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name", "json format :param labels: N x 1, numpy.ndarray(int) :param fn_json: str :return: None", "point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for", "face_point_index, point_feat, points, unsampled) if max_pool: # unsampled faces have only one point,", "max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point", "exclude undetermined (label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points", "best_iou_model -= 1 # restore to original values # Calculate avg point part", "idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] =", "# Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json:", "'r') as fin: for line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\"", "np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling", "label != \"undetermined\"]) + '\\n' \\ \"Per label mesh part IoU from comp:", "dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json)", "best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1 # restore to", "+ str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] +", "axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool,", "'\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr * 100,", "x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int)", "np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool", "faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as", "os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR))", "= np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in", "== point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh", "Part IoU: \" + str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point", "= comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude undetermined", "= \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate point building", "# restore to original values # Calculate avg point part and shape IOU", "get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc))", "axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return", "calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR,", "* 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \" +", "BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR)", "From Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh", "evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * #", "2)) + '\\n' \\ \"Mesh Part IoU From Comp- FR: \" + str(", "import os import json import sys from tqdm import tqdm from evaluation.mesh_utils import", "assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR", "for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in", "labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices, faces, face_labels, components, face_area def", "to triangles and components through avg pooling :param vertices: N x 3, numpy.ndarray(float)", "# Save best buf = '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf", "\"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert", "with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels =", "+ \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf += \"Point", "\\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32))", "point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR,", "for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in", "face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces for face in", "point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if", "2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Per", "BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR", "axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to components for comp_idx in", "get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool,", "+ '\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp *", "2)) + '\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all']", "import json import sys from tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply,", "1, numpy.ndarray(int) \"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\"))", "as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k = 200 best_iou_model =", "os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\")", "ground: N x 1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param face_area:", "= ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not None: face_area = np.copy(face_area)", "np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool)", "Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json", "= np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)])", "\"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool", "\"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) +", "label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label", "+ '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \"", "point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as", "list(str) \"\"\" models_fn = [] with open(split_fn, 'r') as fin: for line in", "np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \"", "'\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR,", "building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn]", "# Transfer point predictions to triangles # Find nearest point and assign its", "= np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool:", "\"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\")", "max == avg. feat. , that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool)", "x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool =", "model_name + \".obj\")) # Calculate face area faces -= 1 face_area = calculate_face_area(vertices=vertices,", "\": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if label", "also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to", "max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {}", "save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf += \"Point Classification Accuracy:", "undetermined (label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face", "so max == avg. feat. , that of the nearest point face_feat_from_tr_max_pool =", "str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp:", "the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces", "(BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\")", "accuracy: float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground !=", "(BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\")", "ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not None: face_area = np.copy(face_area) face_area", "mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr,", "= len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components,", "= \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat", "1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param point_feat: K x 31,", "+ '\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] *", "-= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components", "len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1]))", "'\\n' \\ \"Point Part IoU - FR: \" + str( np.round(point_part_iou['fr-part'] * 100,", "exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification", "os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results", "\"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground: N", "face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] +", "np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \"", "face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool)", "\"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR =", "components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area faces -= 1", "fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0])", "# Find faces that have no corresponding points sampled = set(point_face_index.flatten()) unsampled =", "directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR =", "label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR, \"results_log.txt\"), 'w') as fout_txt: fout_txt.write(buf)", "model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for", "Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\", "points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth labels with", "= os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT", "# Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool,", "Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per", "100, 2)) + '\\n' \\ \"Per label mesh part IoU from triangles: \"", ", that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling", "point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx],", "'\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100,", "np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to components for comp_idx", "str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp:", "\"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory", "if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:,", "assert (face_area.shape[0] == faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name +", "Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n'", "range(top_k)], \\ [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)],", "\": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if label", "M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31,", "save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results", "points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to triangles #", "= point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k", "mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate part and", "1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist()", "M x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M x 1,", "mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32))", "prediction[non_zero_idx] if face_area is not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy", "pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions", "mesh data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels", "face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points,", "# Get mesh data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer", "(os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert", "str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part IoU", "point predictions to triangles # Find nearest point and assign its point feature", "= os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR)", "* 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \" +", "if label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\"", "BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR", "\"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR,", "prediction = prediction[non_zero_idx] if face_area is not None: face_area = np.copy(face_area) face_area =", "face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat,", "M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1,", "with no sample points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point predictions", "x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float)", "IoU: \" + \", \".join([label + \": \" + str(np.round( point_part_iou[ label] *", "max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis]", "\"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100,", "in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) /", "\".obj\")) # Calculate face area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert", "- sampled) # faces with no sample points face_centers = compute_face_centers(faces, unsampled, vertices)", "mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh", "range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx in face_idx:", "labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels", "N x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get points", "Get per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except", "for evaluation :param model_name: str :return: vertices: N x 3, numpy.ndarray(float) faces: M", "- FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh", "!= \"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n'", "idx in sort_idx] best_iou_model -= 1 # restore to original values # Calculate", "\" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU", "\"\"\" Get mesh data needed for evaluation :param model_name: str :return: vertices: N", "np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \"", "== face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling also", "point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] ==", "'\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100,", "+ '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool *", "{}, {}, {}, {} print(\"Calculate part and shape IOU for point and mesh", "pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined", "np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point features (probabilities)", "1)) # Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 #", "= [best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1 # restore to original", "point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr))", "training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool,", "# we exclude undetermined (label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape) #", "Save best buf = '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf +=", "np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp: \"", "pooling for sampled faces for face in sampled: mask = np.squeeze(point_face_index == face)", "point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels,", "assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert", "evaluation :param model_name: str :return: points: N x 3, numpy.ndarray(float) point_gt_labels: N x", "mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float(", "point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get", "face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to components", "\"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR,", "100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \" + str(", "\"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\", "= np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool,", "try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool:", "From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh", "for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR,", "model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0]", "np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp: \"", "+ '\\n' \\ \"Per label mesh part IoU from triangles: \" + \",", "M x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M x 1,", "point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate", "aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\")", "\" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2))", "face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr)", "+= \"Best model iou: \" + str(best_iou_model[i]) + \", \" + best_model_fn[i] +", "= np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces for face in sampled:", "read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name +", "point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred label point_pred_labels =", "= get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area)", "Find faces that have no corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces)))", "= classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool)", "\"\"\" # Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json", "100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles - FR: \"", "\".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label", "\"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR,", "numpy.ndarray(int) :param fn_json: str :return: None \"\"\" # Convert numpy to dict labels_json", "+ 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool,", "100, 2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf)", "FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per label", "and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()])", "= '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou:", "components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json =", "BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR,", "best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \\ [[] for _ in", "assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\",", "face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if", "mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\", "prediction, face_area=None): \"\"\" Classification accuracy :param ground: N x 1, numpy.ndarray(int) :param prediction:", "best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for", "x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float)", "= np.sum(ground == prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points,", "\\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\", "2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all']", "IoU from triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0]", "IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\", "+ \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in", "= best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for", "mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float(", "numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M", "np.sum(ground == prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat,", "+ \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2))", "\"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From", "faces that have no corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) -", "'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index],", "not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground ==", "in tqdm(models_fn): # Get point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index =", "# Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True)", "os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR =", "\\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn]", "= dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w') as fout_json: json.dump(labels_json,", "x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float)", "def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground: N x 1, numpy.ndarray(int)", "\" + str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point Shape IoU: \"", "have no corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) #", "+ \".obj\")) # Calculate face area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces)", "From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n'", "accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy", "faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions", "From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh", "100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \" + str(", "3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area:", "(label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face index", "for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels,", "best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k -", "+ str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\", "numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int)", "unsampled faces have only one point, so max == avg. feat. , that", "'\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" +", "np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground == prediction) / float(len(ground)) return", "best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \\ [[] for", "= get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn)", "= np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return", "1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp", "\", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label", "predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx", "\\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate point building iou", "in sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1 #", "+ '\\n' \\ \"Point Part IoU: \" + str( np.round(point_part_iou['all'] * 100, 2))", "for _ in range(top_k)], \\ [[] for _ in range(top_k)], \\ [[] for", "face_labels = np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label", "get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels,", "format :param labels: N x 1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\"", "def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for evaluation :param model_name: str", "1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \\ [[]", "IoU from triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0]", "face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:,", "+ '\\n' \\ \"Per label point part IoU: \" + \", \".join([label +", "mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool", "\" + str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU", "return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and", "in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices, faces, face_labels, components, face_area", "IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) +", "avg point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr", "np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp- FR:", "# Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json:", "faces with no sample points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point", "components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles and components", "\"Point Part IoU: \" + str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\", "= np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy =", "mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc", "face_labels: M x 1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M x", "{}, {}, {}, {}, {} print(\"Calculate part and shape IOU for point and", "* 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \" +", "+ '\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] *", "x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float)", "worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()]) /", "face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and return model names :param split_fn:", "100, 2)) + '\\n' \\ \"Per label point part IoU: \" + \",", "\\ \"Per label mesh part IoU from triangles: \" + \", \".join( [label", "\"\"\" Transfer point predictions to triangles and components through avg pooling :param vertices:", "\\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification", "N x 1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return: accuracy: float", "Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh Shape", "for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = ''", "point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces for face", "def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format :param labels: N x", "ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground ==", "1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction", "comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool", "best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx", "{}, {}, {} print(\"Calculate part and shape IOU for point and mesh tracks\")", "(point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get", "* 100, 2)) + '\\n' \\ \"Point Part IoU - FR: \" +", "+ \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) #", "'\\n' \\ \"Per label mesh part IoU from comp: \" + \", \".join([label", "ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction =", "= comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool =", "mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area)", "if label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\"", "dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def", "Calculate face area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] ==", "point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation :param", "N x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M x 1,", "\" + str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i]", "as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in labels_json.items():", "point and mesh tracks\") for model_fn in tqdm(models_fn): # Get point cloud data", "best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf +=", "\".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in", "2)) + '\\n' \\ \"Point Part IoU: \" + str( np.round(point_part_iou['all'] * 100,", "accuracy :param ground: N x 1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int)", "acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) /", "point_part_iou[ label] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"])", "+ str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) +", "ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not None: face_area =", "point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels, components, face_area", "IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\", "prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground =", "+ str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"])", "except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] =", "FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def", "point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices, faces, face_gt_labels, components,", "# Export json file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__", "face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format :param", "np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] +", "Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "!= \"undetermined\"]) + '\\n' \\ \"Per label mesh part IoU from comp: \"", "M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1,", "point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces,", "point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape)", "np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name):", "if face_area is not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy =", "that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for", "# Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn +", "from triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] *", "face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x", "- 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) #", "\"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) +", "index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index", "+ 1 # we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool,", "+ '\\n' \\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part']", "save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format :param labels: N x 1,", "FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification", "3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param points: K x 3,", "{} # Find faces that have no corresponding points sampled = set(point_face_index.flatten()) unsampled", "# Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt:", "os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories", "results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results", "2)) + '\\n' \\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100,", "unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample points face_centers =", "# Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we", "len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool =", "= np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to components for", "face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] /", "def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation :param model_name: str :return:", "== point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred label point_pred_labels", "# Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file", "np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \"", "n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool =", "# Use avg pooling for sampled faces for face in sampled: mask =", "sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\")", "point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate", "v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0 if", "/ float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool))", "np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces for face in sampled: mask", "str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles", "str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in toplabels.values() if label !=", "31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\"", "in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat,", "x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int)", "= get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp =", "mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool", "= \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels)", "Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per", "\"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\",", "M x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" # Load obj", "= {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool =", "point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation", "Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \" +", "point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data", "building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] =", "comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool,", "M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31,", "\", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for", "(point_gt_labels.shape == point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"),", "3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components:", "\"\"\" models_fn = [] with open(split_fn, 'r') as fin: for line in fin:", "From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh", "point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) -", "= \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate", "max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh", "values # Calculate avg point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou", "faces for face in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask],", "M x 1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param point_feat: K", "for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()])", "\\ [[] for _ in range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT)", "= list(set(np.arange(len(faces))) - sampled) # faces with no sample points face_centers = compute_face_centers(faces,", "1 # restore to original values # Calculate avg point part and shape", "IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n'", "= prediction[non_zero_idx] if face_area is not None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx]", "mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for", ":param fn_json: str :return: None \"\"\" # Convert numpy to dict labels_json =", "range(top_k)], \\ [[] for _ in range(top_k)] # Get model names models_fn =", "get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation :param model_name: str :return: vertices:", "Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n'", "face_area: M x 1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components =", "during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool =", "np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per label point part IoU: \"", "\"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for", "get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in", "label mesh part IoU from triangles: \" + \", \".join( [label + \":", "model_name + \".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"),", "get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc", "1 # we exclude undetermined (label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape)", "\"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR,", "to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json)", "\\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2))", "np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if", "+ str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR =", "/ float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp", "N x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x 1,", "np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices,", "\": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in toplabels.values()", "* 100, 2)) + '\\n' \\ \"Per label mesh part IoU from triangles:", "!= 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not None:", "results buf += \"Point Classification Accuracy: \" + str(np.round(point_acc * 100, 2)) +", "= np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find", "part IoU: \" + \", \".join([label + \": \" + str(np.round( point_part_iou[ label]", "exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh", "point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0)", "np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) -", "label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Per label mesh", "avg pooling for sampled faces for face in sampled: mask = np.squeeze(point_face_index ==", "in range(top_k)], \\ [[] for _ in range(top_k)], \\ [[] for _ in", "Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v", "= mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1", "\"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From", "= s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k", "3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param components: M x 1,", "from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices,", ":param split_fn: :return: models_fn: list(str) \"\"\" models_fn = [] with open(split_fn, 'r') as", "= get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc =", "Get point cloud data needed for evaluation :param model_name: str :return: points: N", "2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf) with", "point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx]", "(os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories", "for face in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0)", "best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx in", "= np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx]", "axis=0) if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face]", "= os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param", "Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n'", "100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp: \" + str(", "for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx]", "'' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \"", "= os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR", ":param vertices: N x 3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param", "read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area faces -= 1 face_area =", "\" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool,", "os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR", ":param prediction: N x 1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return:", "N x 1, numpy.ndarray(int) \"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name", "numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\"))", "IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\", "IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n'", "axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we", "point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index", "= np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1", "components: M x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" # Load", "point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool)", "for sampled faces for face in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face]", "face_point_index[face] = mask.nonzero()[0].tolist() # Transfer point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]):", "M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool", "get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy", "{}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {},", "transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles", "M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components))", "+ str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point Shape IoU: \" +", "point cloud data needed for evaluation :param model_name: str :return: points: N x", "Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "point_feat, point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) #", "+ '\\n' \\ \"Mesh Part IoU From Triangles - FR: \" + str(", "float(len(point_buildings_acc)) mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp =", "json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] =", "data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save", "point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles and components through avg", "\" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100,", "2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Max", "cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k", "model_name + \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for", "for point and mesh tracks\") for model_fn in tqdm(models_fn): # Get point cloud", "fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for", "face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index,", "Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth", "accuracy[0] else: accuracy = np.sum(ground == prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices,", "get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {}", "point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool =", "iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] =", "From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh", "None: face_area = np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0]", "= read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area faces -= 1 face_area", "face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save", "Find nearest point and assign its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers,", "= {} # Find faces that have no corresponding points sampled = set(point_face_index.flatten())", "str :return: None \"\"\" # Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist()))", "toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n' \\", "sys from tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\", "face_area: N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction) ground", "predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components,", "# Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels,", "that have no corresponding points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled)", "Classification accuracy :param ground: N x 1, numpy.ndarray(int) :param prediction: N x 1,", "Get mesh data needed for evaluation :param model_name: str :return: vertices: N x", "classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR,", "= np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx]", "# we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis]", "From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\", "\\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100,", "assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\",", "points sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no", "numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" #", "Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face", "point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param max_pool:", "mesh part IoU from comp: \" + \", \".join([label + \": \" +", "models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for evaluation", "vertices: N x 3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param components:", "in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) /", "range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\", "face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: # unsampled faces have only one", "file and return model names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn =", "Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude", "\\ \"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy", "face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool", "+ \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if", "for _ in range(top_k)], \\ [[] for _ in range(top_k)] # Get model", "mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool)", "get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)", "iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\",", "face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions face_pred_labels_from_tr,", "str :return: vertices: N x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels:", "str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp:", "Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n'", "= fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape", "models_fn = [] with open(split_fn, 'r') as fin: for line in fin: models_fn.append(line.strip())", "comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] +", "+ \": \" + str(np.round( point_part_iou[ label] * 100, 2)) for label in", "str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\"))", "classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) #", "model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"]", "face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr,", "points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices, faces,", "\" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in toplabels.values() if", "* 100, 2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n'", "face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] =", "1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\" # Convert numpy to dict", "== point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r')", "str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"]) +", "[best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn", "2)) + '\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all']", "Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "+ \"_label.json\")) # Log results buf += \"Point Classification Accuracy: \" + str(np.round(point_acc", "= get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp,", "2)) + '\\n' \\ \"Per label point part IoU: \" + \", \".join([label", "From Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh", "+ str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part", "str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp:", "fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,))", "# Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for", "model_fn in tqdm(models_fn): # Get point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index", "Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh Shape", "for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Per label", "for evaluation :param model_name: str :return: points: N x 3, numpy.ndarray(float) point_gt_labels: N", "import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet", "\"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2)) +", "iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels,", "in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed", "+ '\\n' \\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2))", "str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles:", "Export json file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__ ==", "np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that have no", "1 # we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:,", "'\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp * 100,", "+ \".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r')", "open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip())", "return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\", "\\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\"", "get_split_models(split_fn): \"\"\" Read split.txt file and return model names :param split_fn: :return: models_fn:", "FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification", "mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float(", "+ best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] +", "toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Per label mesh part IoU", "points, unsampled) if max_pool: # unsampled faces have only one point, so max", "face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read", "\\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2))", "= np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool: # Use max", "os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground:", "\" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if label !=", "best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\"))", "model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for", "2)) + '\\n' \\ \"Per label mesh part IoU from triangles: \" +", "= np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that have", "+ '\\n' \\ \"Point Part IoU - FR: \" + str( np.round(point_part_iou['fr-part'] *", "dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point features (probabilities) try:", "model iou: \" + str(best_iou_model[i]) + \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i],", "os import json import sys from tqdm import tqdm from evaluation.mesh_utils import read_obj,", "mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool", "fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components", "'\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100,", "\" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "'\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100,", "np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool,", "= \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"),", "== int(comp))[0]] = label return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json):", "x 1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param face_area: N x", "float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool", "np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction", "face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and return model names", "= model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx]", "2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool", "\"Per label mesh part IoU from comp: \" + \", \".join([label + \":", "mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn +", "nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled faces for", "100, 2)) + '\\n' \\ \"Point Part IoU - FR: \" + str(", "IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part", "# faces with no sample points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer", "top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn", "cloud data needed for evaluation :param model_name: str :return: points: N x 3,", "Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with", "'\\n' \\ \"Per label mesh part IoU from triangles: \" + \", \".join(", "From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh", "str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"]) +", ":param ground: N x 1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param", "(face_area.shape[0] == faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"),", "3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat:", "mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 #", "= json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get", "point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x", "[best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model", "Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR", "point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k -", "2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all']", "+ '\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh", "np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \"", "fin: for line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point", "from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import *", "label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices, faces, face_labels, components,", "\" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\"", "+ '\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool *", "numpy.ndarray(int) \"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) #", "FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy", "= np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for", "if label != \"undetermined\"]) + '\\n' \\ \"Per label mesh part IoU from", "> best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k", "comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] *", "in json format :param labels: N x 1, numpy.ndarray(int) :param fn_json: str :return:", "np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp- FR:", "accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp)", "components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = []", "numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M", "IoU from comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[", "face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components =", ":param face_area: N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction)", "+ '\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] *", ":param labels: N x 1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\" #", "face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file", "\"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp, label", "np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \"", "+ str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in toplabels.values() if label", "\"_label.json\")) # Log results buf += \"Point Classification Accuracy: \" + str(np.round(point_acc *", "range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \" + str(best_iou_model[i]) + \",", "= face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx", "idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1", "data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh data vertices,", "faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles and", "= read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name", "2)) + '\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all']", "From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh", "comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that have no corresponding", "Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape", "- 1)) # Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis] + 1", "+ \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] ==", "1, numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool:", "\"undetermined\"]) + '\\n' \\ \"Per label mesh part IoU from comp: \" +", "for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred =", "float(len(label_iou)) + 1 # handle cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k", "numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M", "axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool", "with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index =", "acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for", "/ np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground == prediction) / float(len(ground))", "i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \" + str(best_iou_model[i])", "np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part IoU from", "face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool", "directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR =", "nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\",", "== (len(toplabels) - 1)) # Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:, np.newaxis]", "IoU: \" + str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part", "model names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn = [] with open(split_fn,", "From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh", "buf = '' for i in range(top_k): print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model", "Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() # Transfer", "100, 2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\", "# Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist() #", "1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name +", "+ \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if", "mesh_acc_from_tr = np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc", "np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points,", "Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n'", "= np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1))", "From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\", "mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr", "have only one point, so max == avg. feat. , that of the", "\"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) +", "\\ [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)] #", "x 1, numpy.ndarray(int) \"\"\" # Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name +", "\\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2))", "numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N", "point, so max == avg. feat. , that of the nearest point face_feat_from_tr_max_pool", "+ str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in toplabels.values() if label", "face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x", "100, 2)) + '\\n' \\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] *", "assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point features (probabilities) try: point_feat =", "[] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0)", "# Get points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground", "exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\" Classification accuracy :param ground: N x 1,", "obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area", "+ '\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] *", "\\ \"Per label mesh part IoU from comp: \" + \", \".join([label +", "feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) #", "numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))", "unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: # unsampled faces", "faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format", "accuracy = np.sum(ground == prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components,", "== comp_idx).nonzero()[0] point_idx = [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)])", "face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines()", "best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx]", "IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\", "fout_json) if __name__ == \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] =", "= np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during", "2)) for label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Average", "face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format :param labels: N", "label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\" +", "best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \\ [[] for _", "np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU: \" + str(", "point and assign its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index,", "in toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR, \"results_log.txt\"), 'w')", "= np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is", "assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create", "models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for evaluation :param model_name:", "\\ \"Point Part IoU - FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2))", "axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during training assert", "# Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for", "'\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100,", "= np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point features", "len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save", "needed for evaluation :param model_name: str :return: points: N x 3, numpy.ndarray(float) point_gt_labels:", "np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf =", "str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU: \" +", "model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {},", "\\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_comp_max_pool) # Save mesh feat data", "K x 31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool", "s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k -", "import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\")", "= os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated mesh features FACE_FEAT_FROM_TR_DIR", "\\ [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)], \\", "str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point Shape IoU: \" + str(", "os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction, face_area=None): \"\"\"", "face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate", "'\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification", "return model names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn = [] with", "predictions to triangles # Find nearest point and assign its point feature to", "import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations", "Calculate point building iou point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou", "np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou =", "\\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\",", "# Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR", "= json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]]", "{}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {},", "1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx", "label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"]) +", "max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis]", ":return: models_fn: list(str) \"\"\" models_fn = [] with open(split_fn, 'r') as fin: for", "= label return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save", "Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n'", "max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int)", "= get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()]) / float(len(point_buildings_acc)) mesh_acc_from_tr =", "numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" #", "mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate part and shape IOU for", "# Find nearest point and assign its point feature to each unsampled face", ":return: points: N x 3, numpy.ndarray(float) point_gt_labels: N x 1, numpy.ndarray(int) point_pred_labels: N", "point_idx = [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] =", "def get_split_models(split_fn): \"\"\" Read split.txt file and return model names :param split_fn: :return:", "= get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool =", "[] with open(split_fn, 'r') as fin: for line in fin: models_fn.append(line.strip()) return models_fn", "= os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR))", "comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return vertices, faces, face_labels,", "\" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i]", "mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"])", "2)) + '\\n' \\ \"Point Part IoU - FR: \" + str( np.round(point_part_iou['fr-part']", "face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\", "+ str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU: \"", "= np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases", "\", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for", "triangles and components through avg pooling :param vertices: N x 3, numpy.ndarray(float) :param", "= np.mean(point_feat[mask], axis=0) if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask],", "= [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx]", "{}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {},", "with open(split_fn, 'r') as fin: for line in fin: models_fn.append(line.strip()) return models_fn def", "best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx]", "get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool)", "BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert", "point_buildings_iou[model_fn] = get_building_point_iou(point_gt_labels, point_pred_labels) # Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr,", "for idx in sort_idx] best_iou_model -= 1 # restore to original values #", "1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool,", "mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best", "numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json,", "np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU - FR: \"", "mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc in point_buildings_acc.values()])", "components: M x 1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param point_feat:", "sample points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to triangles", "def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to", "where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k -", "np.newaxis] + 1 # we exclude undetermined (label 0) during training face_labels_from_comp_avg_pool =", "- FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per", "From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh", "unsampled) if max_pool: # unsampled faces have only one point, so max ==", "int(comp))[0]] = label return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\"", "\"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2)) +", "BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert", "points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for", "100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str(", "\"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100,", "os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf += \"Point Classification Accuracy: \"", "part IoU from triangles: \" + \", \".join( [label + \": \" +", "best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in", "\" + \", \".join([label + \": \" + str(np.round( point_part_iou[ label] * 100,", "+ \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for", "str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp-", "x 3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K x", "= np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area)", "for line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud", "str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles:", "directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR,", "x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool:", "\"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred,", "idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx]", "\": \" + str(np.round( point_part_iou[ label] * 100, 2)) for label in toplabels.values()", "to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: #", "vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) # Calculate face area faces", "components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json format :param labels:", "str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU - FR:", "if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1] =", "faces: M x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M x", "split.txt file and return model names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn", "# Transfer point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components", "best_model_fn[i] + \"_label.json\")) # Log results buf += \"Point Classification Accuracy: \" +", "+ \", \".join([label + \": \" + str(np.round( point_part_iou[ label] * 100, 2))", "str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles", "Read split.txt file and return model names :param split_fn: :return: models_fn: list(str) \"\"\"", "IoU from comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[", "mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool,", "\" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh", "np.argmax(point_feat, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0) during training", "\\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n'", "part IoU from comp: \" + \", \".join([label + \": \" + str(np.round(", "\".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model", "s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels", "np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc", "2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all']", "line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get point cloud data", "face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool,", "Log results buf += \"Point Classification Accuracy: \" + str(np.round(point_acc * 100, 2))", "classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\", "'\\n' \\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100,", "read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories", "'r') as fin_json: labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in", "- 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1 /", "unsampled, vertices) # Transfer point predictions to triangles # Find nearest point and", "+ \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou", "+ str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From", "IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\", "Accuracy: \" + str(np.round(point_acc * 100, 2)) + '\\n' \\ \"Point Shape IoU:", "Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best", "+ '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i],", "with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels =", "dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w') as", "sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx", "bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M x 1, numpy.ndarray(int) face_feat_from_tr_avg_pool:", "x 1, numpy.ndarray(float) :return: accuracy: float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground)", "os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf", "2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all']", "(point_feat.shape[1] == (len(toplabels) - 1)) # Calculate pred label point_pred_labels = np.argmax(point_feat, axis=1)[:,", "np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces", "np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if", "face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude", "Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] =", ":param point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param", "vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in json", "numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int)", "x 31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool :return:", "label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou)) +", "1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\"", "(os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True)", "\" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU", "'w') as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k = 200 best_iou_model", "float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) #", "From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n'", "np.newaxis] + 1 # we exclude undetermined (label 0) during training assert (point_gt_labels.shape", "[[] for _ in range(top_k)], \\ [[] for _ in range(top_k)], \\ [[]", "\"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) +", "'\\n' \\ \"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part']", "max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] = mask.nonzero()[0].tolist()", "\".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou =", "+ '\\n' \\ \"Per label mesh part IoU from comp: \" + \",", "IOU for point and mesh tracks\") for model_fn in tqdm(models_fn): # Get point", "face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx in face_idx: try:", "import sys from tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers,", "+ \".txt\"), 'r') as fin_txt: point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p", "+ '\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] *", "IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) +", "label point part IoU: \" + \", \".join([label + \": \" + str(np.round(", "\"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) +", "best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in", "point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation :param model_name: str", "+ \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst", "\\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2))", "faces have only one point, so max == avg. feat. , that of", "point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape == point_gt_labels.shape) return points, point_gt_labels, point_pred_labels, point_feat, point_face_index", "json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per", "best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR,", "= [[] for _ in range(top_k)], \\ [[] for _ in range(top_k)], \\", "triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100,", "/ np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred", "\"\"\" Read split.txt file and return model names :param split_fn: :return: models_fn: list(str)", "[[] for _ in range(top_k)], \\ [[] for _ in range(top_k)] # Get", "np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 #", "_ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR,", "for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn =", "\"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR =", "* 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp- FR: \"", "= 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[] for _ in range(top_k)], \\", "+ '\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh", "np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool,", "one point, so max == avg. feat. , that of the nearest point", "2)) + '\\n' \\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp", "[best_model_fn[idx] for idx in sort_idx] best_iou_model -= 1 # restore to original values", "directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) # Create directories for best results BEST_POINTS_DIR", "points: K x 3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param point_face_index:", "{} print(\"Calculate part and shape IOU for point and mesh tracks\") for model_fn", "1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\", "get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\", ":param faces: M x 3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param", "+ str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From", "= classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] =", "print(best_iou_model[i]); print(best_model_fn[i]) buf += \"Best model iou: \" + str(best_iou_model[i]) + \", \"", "face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that", "+ '\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] *", "{} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp, mesh_buildings_acc_from_tr_max_pool, \\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {}", "3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param point_face_index: K x 1,", "np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles -", "/ float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for i in range(top_k):", "prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False):", "components through avg pooling :param vertices: N x 3, numpy.ndarray(float) :param faces: M", "'\\n' \\ \"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part']", "Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n'", "# Get per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\"))", "evaluation :param model_name: str :return: vertices: N x 3, numpy.ndarray(float) faces: M x", "idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx]", "avg pooling :param vertices: N x 3, numpy.ndarray(float) :param faces: M x 3,", "# Calculate face area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0]", "face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool =", "fn_json: str :return: None \"\"\" # Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str),", "shape IOU for point and mesh tracks\") for model_fn in tqdm(models_fn): # Get", "/ float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool))", "\\ \"Mesh Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] *", "= {}, {}, {}, {}, {} print(\"Calculate part and shape IOU for point", "numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float)", "and shape IOU for point and mesh tracks\") for model_fn in tqdm(models_fn): #", "iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1]", "of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) # Use avg pooling for sampled", "'\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification", "numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index: N", "label mesh part IoU from comp: \" + \", \".join([label + \": \"", "triangles # Find nearest point and assign its point feature to each unsampled", "undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if", "acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_comp_max_pool.values()])", "1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn", "BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1]", "= np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for", "as fin: for line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name): \"\"\" Get", "os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create directories for aggregated", "face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components to labels", "'\\n' \\ \"Point Part IoU: \" + str( np.round(point_part_iou['all'] * 100, 2)) +", "np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx]", "'\\n' \\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100,", "list(set(np.arange(len(faces))) - sampled) # faces with no sample points face_centers = compute_face_centers(faces, unsampled,", "get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] =", "open(split_fn, 'r') as fin: for line in fin: models_fn.append(line.strip()) return models_fn def get_point_cloud_data(model_name):", "for _ in range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr,", ":return: vertices: N x 3, numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M", "\"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log results buf += \"Point Classification", "transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn]", "# Calculate classification accuracy point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn]", "== faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r')", "np.sum([v for v in label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where", "np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not", "prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground == prediction) /", "BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) #", "print(\"Calculate part and shape IOU for point and mesh tracks\") for model_fn in", "Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name + \".txt\"), 'r') as fin_txt: point_face_index", "\" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2))", "- 1] = point_pred_labels best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] =", "best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in", "in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except: point_idx.append(face_point_index[int(idx)]) comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx]", "part and shape IOU for point and mesh tracks\") for model_fn in tqdm(models_fn):", "+ str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part", "prediction: N x 1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return: accuracy:", "Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2)) + '\\n' \\", "labels: N x 1, numpy.ndarray(int) :param fn_json: str :return: None \"\"\" # Convert", "= os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network", "np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \"", "label != \"undetermined\"]) + '\\n' \\ \"Average Pooling\" + '\\n' \\ \"---------------\" +", "face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True)", "\" + str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh Shape IoU", "json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:]", "mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"])", "K x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int)", "json import sys from tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area,", "axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx]", "(probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0],", "np.copy(face_area) face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy", "file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\": top_k", "= get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc for acc", "'\\n' \\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100,", "_ in range(top_k)], \\ [[] for _ in range(top_k)], \\ [[] for _", "+ \": \" + str(np.round( mesh_part_iou_from_comp_max_pool[ label][0] * 100, 2)) for label in", "= classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn]", "face_area = face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy =", "'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0]", "(os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert", "face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt", "classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] =", "= get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool =", "point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point", "Comp: \" + str( np.round(mesh_acc_from_comp_max_pool * 100, 2)) + '\\n' \\ \"Mesh Shape", "= np.zeros_like(comp_feat_avg_pool) face_point_index = {} # Find faces that have no corresponding points", "Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool =", "label in toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\" +", "models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {},", "* 100, 2)) + '\\n' \\ \"Per label point part IoU: \" +", "mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for i in", "\\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str(", "= os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True) # Create", "fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert (point_face_index.shape ==", "\" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy", "only one point, so max == avg. feat. , that of the nearest", "\\ \"Mesh Shape IoU From Triangles: \" + str( np.round(mesh_shape_iou_from_tr['all'] * 100, 2))", "face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else:", "\".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label", "/ float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\"", "'\\n' \\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] *", "return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point", "* # BuildNet directories BUILDNET_BASE_DIR = os.path.join(os.sep, \"media\", \"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert", "Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp, face_feat_from_tr, face_feat_from_comp, \\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool", "point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed for evaluation :param model_name:", "face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: # unsampled faces have", "\\ \"Mesh Part IoU From Comp- FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100,", "to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx =", "+ str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per label point part", "if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool = np.zeros_like(face_feat_from_comp_avg_pool) comp_feat_max_pool = np.zeros_like(comp_feat_avg_pool) face_point_index =", "x 3, numpy.ndarray(float) :param faces: M x 3, numpy.ndarray(int) :param components: M x", "# handle cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1] =", "float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0]", "vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from point", "face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn]", "and assign its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat,", "\\ \"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr_max_pool * 100, 2))", "face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best and worst model label_iou", ":return: None \"\"\" # Convert numpy to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) #", "- 1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1] =", "x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool", "Triangles: \" + str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR, model_name", "Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\", "[best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred", "components, face_area = get_mesh_data_n_labels(model_fn) # Infer face labels from point predictions face_pred_labels_from_tr, face_pred_labels_from_comp,", "tqdm(models_fn): # Get point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn)", "- 1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred", "if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] = comp_feat_max_pool[comp_idx] face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:,", "= [] with open(split_fn, 'r') as fin: for line in fin: models_fn.append(line.strip()) return", "= [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx]", "str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in toplabels.values() if label !=", "point_feat, points, unsampled) if max_pool: # unsampled faces have only one point, so", "\"\"\" # Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR, model_name + \".obj\")) #", "float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool", "1, numpy.ndarray(int) components: M x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\"", "\"Mesh Classification Accuracy From Triangles: \" + str( np.round(mesh_acc_from_tr * 100, 2)) +", "+ str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From", "point_buildings_acc[model_fn] = classification_accuracy(point_gt_labels, point_pred_labels) mesh_buildings_acc_from_tr[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_tr) mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn]", "point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1,", "\", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR,", "\"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] + \"_label.json\")) # Log", "try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels)", "\\ mesh_buildings_acc_from_comp_max_pool = {}, {}, {}, {}, {} print(\"Calculate part and shape IOU", "point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat", "np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert", "1, numpy.ndarray(int) point_pred_labels: N x 1, numpy.ndarray(int) point_pred_feat: N x 31, numpy.ndarray(float) point_face_index:", "mesh part IoU from triangles: \" + \", \".join( [label + \": \"", "= np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground = ground[non_zero_idx]", "\"maria\", \"BigData1\", \"Maria\", \"buildnet_data_2k\") assert (os.path.isdir(BUILDNET_BASE_DIR)) BUILDNET_OBJ_DIR = os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR", "+ str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"])", "get_point_cloud_data(model_name): \"\"\" Get point cloud data needed for evaluation :param model_name: str :return:", "IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\", "= get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool =", "= os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR))", "\\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" + str(", "= face_area[non_zero_idx] accuracy = np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0]", "\\ \"Mesh Classification Accuracy From Comp: \" + str( np.round(mesh_acc_from_comp * 100, 2))", "get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) point_acc = np.sum([acc", "in mesh_buildings_acc_from_comp_max_pool.values()]) / float( len(mesh_buildings_acc_from_comp_max_pool)) # Save best buf = '' for i", "split_fn: :return: models_fn: list(str) \"\"\" models_fn = [] with open(split_fn, 'r') as fin:", "np.sum([acc for acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc", "get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool)", "numpy.ndarray(float) point_face_index: N x 1, numpy.ndarray(int) \"\"\" # Get points points, _ =", "for label in toplabels.values() if label != \"undetermined\"]) + '\\n' print(buf) with open(os.path.join(NET_RESULTS_DIR,", "np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc", "labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(), dtype=int)[:, np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) #", "np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool", "+ str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "we exclude undetermined (label 0) during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get", "\\ \"Mesh Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2))", "0).nonzero()[0] ground = ground[non_zero_idx] prediction = prediction[non_zero_idx] if face_area is not None: face_area", "from comp: \" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0]", "+ str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy From", "np.newaxis] assert (points.shape[0] == point_gt_labels.shape[0]) # Get per point features (probabilities) try: point_feat", "\": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100, 2)) for label in toplabels.values()", "names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {},", "max_pool: # unsampled faces have only one point, so max == avg. feat.", "comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx", "1 # handle cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k - 1]", ":param components: M x 1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param", "Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles:", "comp_feat_avg_pool[comp_idx] = np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0)", "FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1]", "for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc for acc in", "labels_json = json.load(fin_json) face_labels = np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components ==", "get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp)", "\" + str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU:", "point part IoU: \" + \", \".join([label + \": \" + str(np.round( point_part_iou[", "* 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles - FR:", "through avg pooling :param vertices: N x 3, numpy.ndarray(float) :param faces: M x", "M x 1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components = read_obj(obj_fn=os.path.join(BUILDNET_OBJ_DIR,", "'\\n' \\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2)) +", "# Log results buf += \"Point Classification Accuracy: \" + str(np.round(point_acc * 100,", "1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param face_area: N x 1,", "'\\n' \\ \"Per label point part IoU: \" + \", \".join([label + \":", "+ \", \" + best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i],", "np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Triangles: \"", "= [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx in sort_idx]", "area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0]) #", "per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR, model_fn + \".npy\")) except FileNotFoundError:", "1 return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool,", "acc in mesh_buildings_acc_from_tr.values()]) / float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()])", "and components through avg pooling :param vertices: N x 3, numpy.ndarray(float) :param faces:", "face_pred_labels_from_comp best_model_fn[top_k - 1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model =", "mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()]) / float( len(mesh_buildings_acc_from_tr_max_pool)) mesh_acc_from_comp_max_pool = np.sum([acc", "sampled = set(point_face_index.flatten()) unsampled = list(set(np.arange(len(faces))) - sampled) # faces with no sample", "model label_iou = mesh_buildings_iou_from_comp[model_fn][\"label_iou\"] s_iou = np.sum([v for v in label_iou.values()]) / float(len(label_iou))", "json file with open(fn_json, 'w') as fout_json: json.dump(labels_json, fout_json) if __name__ == \"__main__\":", "os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_comp_pred[i], os.path.join(BEST_COMP_DIR, best_model_fn[i] +", "numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components", "nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled) if max_pool: # unsampled faces have only", "= mask.nonzero()[0].tolist() # Transfer point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx", "face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def get_split_models(split_fn): \"\"\" Read split.txt file and return", "its point feature to each unsampled face nearest_neighbour_of_face_centers(face_centers, face_feat_from_tr_avg_pool, face_point_index, point_feat, points, unsampled)", "+ str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "os.path.join(BUILDNET_BASE_DIR, \"flippedNormal_unit_obj_withtexture\") assert (os.path.isdir(BUILDNET_OBJ_DIR)) BUILDNET_PTS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR =", "best_model_fn[i] + '\\n' save_pred_in_json(best_model_points_pred[i], os.path.join(BEST_POINTS_DIR, best_model_fn[i] + \"_label.json\")) save_pred_in_json(best_model_triangles_pred[i], os.path.join(BEST_TRIANGLES_DIR, best_model_fn[i] + \"_label.json\"))", "faces=faces) assert (face_area.shape[0] == faces.shape[0]) # Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name", "if max_pool: # Use max pooling also face_feat_from_tr_max_pool[face] = np.amax(point_feat[mask], axis=0) face_point_index[face] =", "points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles and components through", "except FileNotFoundError: point_feat = np.zeros((point_gt_labels.shape[0], len(toplabels) - 1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert", "+ 1 # handle cases where iou=0 if s_iou > best_iou_model[-1]: best_iou_model[top_k -", "os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT =", "Shape IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n'", "\".join([label + \": \" + str(np.round( point_part_iou[ label] * 100, 2)) for label", "\" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Mesh Classification Accuracy", "point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp", "exclude undetermined (label 0) during training face_labels_from_comp_avg_pool = np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1", "* 100, 2)) + '\\n' \\ \"Point Shape IoU: \" + str( np.round(point_shape_iou['all']", "\"Per label point part IoU: \" + \", \".join([label + \": \" +", "np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx in sort_idx] best_model_triangles_pred =", "acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for acc in mesh_buildings_acc_from_tr_max_pool.values()])", "sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx] for idx", "return vertices, faces, face_labels, components, face_area def save_pred_in_json(labels, fn_json): \"\"\" Save labels in", "toplabels.values() if label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n' \\", ":param points: K x 3, numpy.ndarray(float) :param point_feat: K x 31, numpy.ndarray(float) :param", "\\ face_pred_labels_from_tr_max_pool, face_pred_labels_from_comp_max_pool = \\ transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=True) #", "1)) assert (point_feat.shape[0] == point_gt_labels.shape[0]) assert (point_feat.shape[1] == (len(toplabels) - 1)) # Calculate", "os.path.join(NET_RESULTS_DIR, \"face_feat_from_tr\") os.makedirs(FACE_FEAT_FROM_TR_DIR, exist_ok=True) FACE_FEAT_FROM_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"face_feat_from_comp\") os.makedirs(FACE_FEAT_FROM_COMP_DIR, exist_ok=True) def classification_accuracy(ground, prediction,", "numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces, components", "face_labels_from_tr_avg_pool = np.argmax(face_feat_from_tr_avg_pool, axis=1)[:, np.newaxis] + 1 # we exclude undetermined (label 0)", "M x 3, numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param points: K", "+ str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "From Comp: \" + str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh", "\\ \"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2))", "in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for idx in sort_idx] best_model_fn = [best_model_fn[idx] for", "during training assert (point_gt_labels.shape == point_pred_labels.shape) # Get points face index with open(os.path.join(BUILDNET_PTS_FACEINDEX_DIR,", "\\ \"Average Pooling\" + '\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy", "'\\n' \\ \"---------------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles: \" +", "face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to triangles # Find", "\".ply\")) # Get ground truth labels with open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as", "point cloud data points, point_gt_labels, point_pred_labels, point_feat, point_face_index = get_point_cloud_data(model_fn) # Get mesh", "in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if max_pool:", "for best results BEST_POINTS_DIR = os.path.join(NET_RESULTS_DIR, \"best_points\") os.makedirs(BEST_POINTS_DIR, exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\")", "== avg. feat. , that of the nearest point face_feat_from_tr_max_pool = np.copy(face_feat_from_tr_avg_pool) #", "x 1, numpy.ndarray(int) :param face_area: N x 1, numpy.ndarray(float) :return: accuracy: float \"\"\"", "labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w') as fout_json:", "label] * 100, 2)) for label in toplabels.values() if label != \"undetermined\"]) +", "face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1]))", "buf += \"Point Classification Accuracy: \" + str(np.round(point_acc * 100, 2)) + '\\n'", "str( np.round(mesh_part_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Comp-", "face_area=None): \"\"\" Classification accuracy :param ground: N x 1, numpy.ndarray(int) :param prediction: N", "\" + str( np.round(point_part_iou['fr-part'] * 100, 2)) + '\\n' \\ \"Per label point", "x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M x 1, numpy.ndarray(int)", "IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\", "np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh part IoU from", "numpy.ndarray(int) :param components: M x 1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float)", "100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles: \" + str(", "\" + str( np.round(mesh_acc_from_tr * 100, 2)) + '\\n' \\ \"Mesh Shape IoU", "point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp", "= np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp)) mesh_acc_from_tr_max_pool = np.sum([acc for", "numpy.ndarray(float) faces: M x 3, numpy.ndarray(int) face_labels: M x 1, numpy.ndarray(int) components: M", "1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0], point_feat.shape[1])) face_feat_from_comp_avg_pool = np.zeros((faces.shape[0],", "BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) #", "np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for idx in face_idx: try: point_idx.extend(face_point_index[int(idx)]) except:", "(points.shape[0] == point_gt_labels.shape[0]) # Get per point features (probabilities) try: point_feat = np.load(os.path.join(NET_RESULTS_DIR,", "exist_ok=True) BEST_TRIANGLES_DIR = os.path.join(NET_RESULTS_DIR, \"best_triangles\") os.makedirs(BEST_TRIANGLES_DIR, exist_ok=True) BEST_COMP_DIR = os.path.join(NET_RESULTS_DIR, \"best_comp\") os.makedirs(BEST_COMP_DIR, exist_ok=True)", "+ \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr[label][0] * 100, 2)) for", "os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"nocolor\") assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR", "faces, components, points, point_feat, point_face_index, max_pool=True) # Calculate point building iou point_buildings_iou[model_fn] =", "\"Point Part IoU - FR: \" + str( np.round(point_part_iou['fr-part'] * 100, 2)) +", "+ '\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] *", "in sort_idx] best_iou_model -= 1 # restore to original values # Calculate avg", "in sort_idx] best_model_triangles_pred = [best_model_triangles_pred[idx] for idx in sort_idx] best_model_comp_pred = [best_model_comp_pred[idx] for", "best_model_triangles_pred[top_k - 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1]", "data needed for evaluation :param model_name: str :return: vertices: N x 3, numpy.ndarray(float)", "labels in json format :param labels: N x 1, numpy.ndarray(int) :param fn_json: str", "mesh tracks\") for model_fn in tqdm(models_fn): # Get point cloud data points, point_gt_labels,", "np.dot(face_area.T, ground == prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground", "else: accuracy = np.sum(ground == prediction) / float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces,", "get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp)", "assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\")", "point predictions to components for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0]", "1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x 1, numpy.ndarray(int) face_labels_from_comp_avg_pool: M", "float(len(ground)) return accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer", "point part and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr =", "return points, point_gt_labels, point_pred_labels, point_feat, point_face_index def get_mesh_data_n_labels(model_name): \"\"\" Get mesh data needed", "open(os.path.join(BUILDNET_PTS_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json = json.load(fin_json) point_gt_labels = np.fromiter(labels_json.values(),", "mesh data needed for evaluation :param model_name: str :return: vertices: N x 3,", "to dict labels_json = dict(zip(np.arange(labels.shape[0]).astype(str), np.squeeze(labels).tolist())) # Export json file with open(fn_json, 'w')", "/ float( len(mesh_buildings_acc_from_tr)) mesh_acc_from_comp = np.sum([acc for acc in mesh_buildings_acc_from_comp.values()]) / float( len(mesh_buildings_acc_from_comp))", "np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn + \".npy\"), face_feat_from_comp.astype(np.float32)) # Save best", "1] = model_fn sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred =", "\"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR = sys.argv[1] assert (os.path.isdir(NET_RESULTS_DIR)) #", "# Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool", "x 1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" # Load obj vertices,", "(os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT)) # Network results directory NET_RESULTS_DIR =", "mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp, face_area) mesh_buildings_iou_from_tr_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr_max_pool, face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] =", "IoU From Comp: \" + str( np.round(mesh_shape_iou_from_comp_max_pool['all'] * 100, 2)) + '\\n' \\", "\"Mesh Part IoU From Comp: \" + str( np.round(mesh_part_iou_from_comp['all'] * 100, 2)) +", ":return: accuracy: float \"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground", "Transfer point predictions to triangles # Find nearest point and assign its point", "mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {}, {} point_buildings_acc, mesh_buildings_acc_from_tr, mesh_buildings_acc_from_comp,", "'\\n' \\ \"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr_max_pool['all'] * 100,", "31, numpy.ndarray(float) :param point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool:", "in range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool,", "str(np.round( point_part_iou[ label] * 100, 2)) for label in toplabels.values() if label !=", "data needed for evaluation :param model_name: str :return: points: N x 3, numpy.ndarray(float)", "for comp_idx in range(comp_feat_avg_pool.shape[0]): face_idx = np.squeeze(components == comp_idx).nonzero()[0] point_idx = [] for", "1, numpy.ndarray(float) face_area: M x 1, numpy.ndarray(float) \"\"\" # Load obj vertices, faces,", "and shape IOU point_shape_iou = get_shape_iou(buildings_iou=point_buildings_iou) point_part_iou = get_part_iou(buildings_iou=point_buildings_iou) mesh_shape_iou_from_tr = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_part_iou_from_tr", "sort_idx] best_iou_model -= 1 # restore to original values # Calculate avg point", "\"Point Shape IoU: \" + str( np.round(point_shape_iou['all'] * 100, 2)) + '\\n' \\", "(os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR = os.path.join(BUILDNET_BASE_DIR, \"dataset\") assert (os.path.isdir(BUILDNET_SPLITS_DIR)) BUILDNET_TEST_SPLIT = os.path.join(BUILDNET_SPLITS_DIR, \"test_split.txt\") assert (os.path.isfile(BUILDNET_TEST_SPLIT))", "accuracy = accuracy[0] else: accuracy = np.sum(ground == prediction) / float(len(ground)) return accuracy", "and return model names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn = []", "Calculate mesh building iou mesh_buildings_iou_from_tr[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_tr, face_area) mesh_buildings_iou_from_comp[model_fn] = get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp,", "numpy.ndarray(int) face_labels_from_comp_max_pool: M x 1, numpy.ndarray(int) \"\"\" n_components = len(np.unique(components)) face_feat_from_tr_avg_pool = np.zeros((faces.shape[0],", "FR: \" + str( np.round(mesh_part_iou_from_comp['fr-part'] * 100, 2)) + '\\n' \\ \"Per label", "face in sampled: mask = np.squeeze(point_face_index == face) face_feat_from_tr_avg_pool[face] = np.mean(point_feat[mask], axis=0) if", "tqdm import tqdm from evaluation.mesh_utils import read_obj, read_ply, calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from", "Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr_max_pool['fr-part'] * 100, 2))", "point_face_index = fin_txt.readlines() point_face_index = np.asarray([int(p.strip()) for p in point_face_index], dtype=int)[:, np.newaxis] assert", "= get_part_iou(buildings_iou=mesh_buildings_iou_from_tr) mesh_shape_iou_from_comp = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool =", "\" + str( np.round(mesh_part_iou_from_comp_max_pool['fr-part'] * 100, 2)) + '\\n' \\ \"Per label mesh", "face_pred_labels_from_comp_max_pool) # Save mesh feat data np.save(os.path.join(FACE_FEAT_FROM_TR_DIR, model_fn + \".npy\"), face_feat_from_tr.astype(np.float32)) np.save(os.path.join(FACE_FEAT_FROM_COMP_DIR, model_fn", "best_iou_model[top_k - 1] = s_iou best_model_points_pred[top_k - 1] = point_pred_labels best_model_triangles_pred[top_k - 1]", "triangles: \" + \", \".join( [label + \": \" + str(np.round(mesh_part_iou_from_tr_max_pool[label][0] * 100,", "max_pool=False): \"\"\" Transfer point predictions to triangles and components through avg pooling :param", "label_iou.values()]) / float(len(label_iou)) + 1 # handle cases where iou=0 if s_iou >", "face area faces -= 1 face_area = calculate_face_area(vertices=vertices, faces=faces) assert (face_area.shape[0] == faces.shape[0])", "mesh_buildings_acc_from_comp[model_fn] = classification_accuracy(face_gt_labels, face_pred_labels_from_comp) mesh_buildings_acc_from_tr_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels, face_pred_labels_from_tr_max_pool) mesh_buildings_acc_from_comp_max_pool[model_fn] = \\ classification_accuracy(face_gt_labels,", "np.mean(point_feat[point_idx], axis=0) face_feat_from_comp_avg_pool[face_idx] = comp_feat_avg_pool[comp_idx] if max_pool: comp_feat_max_pool[comp_idx] = np.amax(point_feat[point_idx], axis=0) face_feat_from_comp_max_pool[face_idx] =", "= np.argmax(face_feat_from_comp_avg_pool, axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis]", "accuracy def transfer_point_predictions(vertices, faces, components, points, point_feat, point_face_index, max_pool=False): \"\"\" Transfer point predictions", "points points, _ = read_ply(os.path.join(BUILDNET_PTS_DIR, model_name + \".ply\")) # Get ground truth labels", "Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh Part", "N x 1, numpy.ndarray(int) :param prediction: N x 1, numpy.ndarray(int) :param face_area: N", "calculate_face_area, compute_face_centers, \\ nearest_neighbour_of_face_centers from iou_calculations import * # BuildNet directories BUILDNET_BASE_DIR =", "200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred, best_model_triangles_pred, best_model_comp_pred, best_model_fn = [[]", "x 1, numpy.ndarray(int) :param points: K x 3, numpy.ndarray(float) :param point_feat: K x", "+ str( np.round(point_part_iou['all'] * 100, 2)) + '\\n' \\ \"Point Part IoU -", "point_face_index, max_pool=False): \"\"\" Transfer point predictions to triangles and components through avg pooling", "\"\"\" Save labels in json format :param labels: N x 1, numpy.ndarray(int) :param", "sort_idx = np.argsort(1 / np.asarray(best_iou_model)).tolist() best_iou_model = best_iou_model[sort_idx] best_model_points_pred = [best_model_points_pred[idx] for idx", "Part IoU From Triangles - FR: \" + str( np.round(mesh_part_iou_from_tr['fr-part'] * 100, 2))", "Get mesh data vertices, faces, face_gt_labels, components, face_area = get_mesh_data_n_labels(model_fn) # Infer face", "face_area) mesh_buildings_iou_from_comp_max_pool[model_fn] = \\ get_building_mesh_iou(face_gt_labels, face_pred_labels_from_comp_max_pool, face_area) # Calculate classification accuracy point_buildings_acc[model_fn] =", "face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool, face_labels_from_tr_max_pool, face_labels_from_comp_max_pool return face_labels_from_tr_avg_pool, face_labels_from_comp_avg_pool, face_feat_from_tr_avg_pool, \\ face_feat_from_comp_avg_pool def", "numpy.ndarray(int) face_feat_from_tr_avg_pool: M x 31, numpy.ndarray(float) face_feat_from_comp_avg_pool: M x 31, numpy.ndarray(float) face_labels_from_triangle_max_pool: M", "np.zeros((faces.shape[0], point_feat.shape[1])) comp_feat_avg_pool = np.zeros((n_components, point_feat.shape[1])) if max_pool: face_feat_from_tr_max_pool = np.zeros_like(face_feat_from_tr_avg_pool) face_feat_from_comp_max_pool =", "\" + \", \".join([label + \": \" + str(np.round( mesh_part_iou_from_comp[ label][0] * 100,", ":param point_face_index: K x 1, numpy.ndarray(int) :param max_pool: bool :return: face_labels_from_triangle_avg_pool: M x", "assert (BUILDNET_PTS_DIR) BUILDNET_PTS_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"point_labels_32\") assert (BUILDNET_PTS_LABELS_DIR) BUILDNET_PTS_FACEINDEX_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\",", "* 100, 2)) + '\\n' \\ \"Point Part IoU: \" + str( np.round(point_part_iou['all']", "Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\ \"Mesh Classification Accuracy From Triangles:", "buf += \"Best model iou: \" + str(best_iou_model[i]) + \", \" + best_model_fn[i]", "label != \"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\" +", "mesh_part_iou_from_comp = get_part_iou(buildings_iou=mesh_buildings_iou_from_comp) mesh_shape_iou_from_tr_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_part_iou_from_tr_max_pool = get_part_iou(buildings_iou=mesh_buildings_iou_from_tr_max_pool) mesh_shape_iou_from_comp_max_pool = get_shape_iou(buildings_iou=mesh_buildings_iou_from_comp_max_pool) mesh_part_iou_from_comp_max_pool", "str( np.round(mesh_acc_from_comp * 100, 2)) + '\\n' \\ \"Mesh Shape IoU From Comp:", "== prediction)[0] / np.sum(face_area) accuracy = accuracy[0] else: accuracy = np.sum(ground == prediction)", "names :param split_fn: :return: models_fn: list(str) \"\"\" models_fn = [] with open(split_fn, 'r')", "= get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou, mesh_buildings_iou_from_tr, mesh_buildings_iou_from_comp, mesh_buildings_iou_from_tr_max_pool, \\ mesh_buildings_iou_from_comp_max_pool = {}, {}, {}, {},", "= np.zeros_like(components) for comp, label in labels_json.items(): face_labels[np.where(components == int(comp))[0]] = label return", "+ str( np.round(mesh_shape_iou_from_comp['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From", "\"100K_inverted_normals\", \"faceindex\") assert (os.path.isdir(BUILDNET_PTS_FACEINDEX_DIR)) BUILDNET_COMP_TO_LABELS_DIR = os.path.join(BUILDNET_BASE_DIR, \"100K_inverted_normals\", \"component_label_32\") assert (os.path.isdir(BUILDNET_COMP_TO_LABELS_DIR)) BUILDNET_SPLITS_DIR =", "From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) + '\\n' \\ \"Mesh", "\"undetermined\"]) + '\\n' \\ \"Max Pooling\" + '\\n' \\ \"-----------\" + '\\n' \\", "__name__ == \"__main__\": top_k = 200 best_iou_model = np.zeros((top_k,)) best_iou_model[:] = 1e-9 best_model_points_pred,", "no sample points face_centers = compute_face_centers(faces, unsampled, vertices) # Transfer point predictions to", "- 1] = face_pred_labels_from_tr best_model_comp_pred[top_k - 1] = face_pred_labels_from_comp best_model_fn[top_k - 1] =", "axis=1)[:, np.newaxis] + 1 if max_pool: face_labels_from_tr_max_pool = np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1", "Read components to labels with open(os.path.join(BUILDNET_COMP_TO_LABELS_DIR, model_name + \"_label.json\"), 'r') as fin_json: labels_json", "[[] for _ in range(top_k)] # Get model names models_fn = get_split_models(split_fn=BUILDNET_TEST_SPLIT) point_buildings_iou,", "\"\"\" prediction = np.copy(prediction) ground = np.copy(ground) non_zero_idx = np.squeeze(ground != 0).nonzero()[0] ground", "str( np.round(mesh_shape_iou_from_tr_max_pool['all'] * 100, 2)) + '\\n' \\ \"Mesh Part IoU From Triangles:", "_ in range(top_k)], \\ [[] for _ in range(top_k)] # Get model names", "\"Mesh Part IoU From Triangles: \" + str( np.round(mesh_part_iou_from_tr['all'] * 100, 2)) +", "np.argmax(face_feat_from_tr_max_pool, axis=1)[:, np.newaxis] + 1 face_labels_from_comp_max_pool = np.argmax(face_feat_from_comp_max_pool, axis=1)[:, np.newaxis] + 1 return" ]
[ "for t, counter in time_counters: cur_n += counter max_n = max(max_n, cur_n) return", "Output: 1. intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) if", "times and add increment/decrement counters by start/end. time_counters = [] for i in", "array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si", ">= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to min heap. heapq.heappush(end_minhq, intervals[i][1])", "time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals):", "bisect import insort # Sort times and add increment/decrement counters by start/end. time_counters", "def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space", "0, 0 for t, counter in time_counters: cur_n += counter max_n = max(max_n,", "[2, 4]] Output: 1 Explanation: Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object):", "minimum number of conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2", "time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to min", "URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting of start and", "Explanation: We need two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals", "is after min end time, remove min end time. if intervals[i][0] >= end_minhq[0]:", "If next start time is after min end time, remove min end time.", "find the minimum number of conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]]", "times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): # If", "Sort times and add increment/decrement counters by start/end. time_counters = [] for i", "= [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n,", "for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n =", "O(n). \"\"\" import heapq if not intervals or not intervals[0]: return 0 #", "# Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1.", "meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype:", "class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity:", "end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms", "end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): #", "start time is after min end time, remove min end time. if intervals[i][0]", "Space complexity: O(n). \"\"\" from bisect import insort # Sort times and add", "return max_n def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print", "rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two", "of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number", ":type intervals: List[List[int]] :rtype: int Time complexity: O(n). Space complexity: O(n). \"\"\" from", "O(n). Space complexity: O(n). \"\"\" from bisect import insort # Sort times and", "in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0", "not intervals or not intervals[0]: return 0 # Sort intervals by start time.", "1 Explanation: Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals):", "complexity: O(n). Space complexity: O(n). \"\"\" from bisect import insort # Sort times", "\"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time", "cur_n) return max_n def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals)", "of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si <", "meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei),", "Example2 Input: intervals = [[7, 10], [2, 4]] Output: 1 Explanation: Only need", "<gh_stars>1-10 \"\"\"Leetcode 253. Meeting Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array", "by start time. intervals.sort() # Use min heap to store end times. end_minhq", "2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals =", "print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10], [2, 4]]", "SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n).", "complexity: O(n). \"\"\" from bisect import insort # Sort times and add increment/decrement", "Meeting Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time", "need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals:", "store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)):", "1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0 for t, counter in", "intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the", "(intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0 for t, counter", "and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference", "time. intervals.sort() # Use min heap to store end times. end_minhq = []", "Space complexity: O(n). \"\"\" import heapq if not intervals or not intervals[0]: return", "t, counter in time_counters: cur_n += counter max_n = max(max_n, cur_n) return max_n", "counter in time_counters: cur_n += counter max_n = max(max_n, cur_n) return max_n def", "# Output: 1. intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals)", "time is after min end time, remove min end time. if intervals[i][0] >=", "Explanation: Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\"", ":rtype: int Time complexity: O(n). Space complexity: O(n). \"\"\" from bisect import insort", "\"\"\" from bisect import insort # Sort times and add increment/decrement counters by", "time_counters: cur_n += counter max_n = max(max_n, cur_n) return max_n def main(): #", "print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals)", "heap to store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in", "i in range(1, len(intervals)): # If next start time is after min end", "int Time complexity: O(n). Space complexity: O(n). \"\"\" from bisect import insort #", "from bisect import insort # Sort times and add increment/decrement counters by start/end.", "class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity:", "time_counters = [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1))", "253. Meeting Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting", "= max(max_n, cur_n) return max_n def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]]", "= [[7, 10], [2, 4]] Output: 1 Explanation: Only need one meeting room", "[] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): # If next start time", "return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int", "intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to min heap. heapq.heappush(end_minhq,", "start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of", "required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting", "0 # Sort intervals by start time. intervals.sort() # Use min heap to", "= [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): # If next start", "intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n). Space complexity: O(n).", "-1)) cur_n, max_n = 0, 0 for t, counter in time_counters: cur_n +=", "ei), find the minimum number of conference rooms required. Example1 Input: intervals =", "intervals = [[7, 10], [2, 4]] Output: 1 Explanation: Only need one meeting", "len(intervals)): # If next start time is after min end time, remove min", "(intervals[i][1], -1)) cur_n, max_n = 0, 0 for t, counter in time_counters: cur_n", "intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7,", "# Add next end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class", "Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type", "time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find", "after min end time, remove min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq)", "conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need", "Use min heap to store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for", "room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int", "= 0, 0 for t, counter in time_counters: cur_n += counter max_n =", ":rtype: int Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq if not", "if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to min heap.", "minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n). Space complexity:", "import heapq if not intervals or not intervals[0]: return 0 # Sort intervals", "return 0 # Sort intervals by start time. intervals.sort() # Use min heap", "the minimum number of conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output:", "if not intervals or not intervals[0]: return 0 # Sort intervals by start", "intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype:", "O(n*logn). Space complexity: O(n). \"\"\" import heapq if not intervals or not intervals[0]:", "= [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) if __name__ == '__main__':", "end time, remove min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add", "0 for t, counter in time_counters: cur_n += counter max_n = max(max_n, cur_n)", "+= counter max_n = max(max_n, cur_n) return max_n def main(): # Output: 2.", "counter max_n = max(max_n, cur_n) return max_n def main(): # Output: 2. intervals", "SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10], [2, 4]] print", "times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.", "insort # Sort times and add increment/decrement counters by start/end. time_counters = []", "intervals.sort() # Use min heap to store end times. end_minhq = [] heapq.heappush(end_minhq,", "def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n). Space", "SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn).", "intervals[0][1]) for i in range(1, len(intervals)): # If next start time is after", "[[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) if __name__ == '__main__': main()", "time, remove min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next", "meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10], [2,", "end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): # If next", "len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time", "10], [2, 4]] Output: 1 Explanation: Only need one meeting room \"\"\" class", "min end time, remove min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) #", "# If next start time is after min end time, remove min end", "cur_n += counter max_n = max(max_n, cur_n) return max_n def main(): # Output:", "List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq if", "to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\"", "room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10], [2, 4]] Output: 1 Explanation:", "SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print", "Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting rooms", "Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting of start", "in range(1, len(intervals)): # If next start time is after min end time,", "for i in range(1, len(intervals)): # If next start time is after min", "O(n). \"\"\" from bisect import insort # Sort times and add increment/decrement counters", "by start/end. time_counters = [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters,", "not intervals[0]: return 0 # Sort intervals by start time. intervals.sort() # Use", "[] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n", "end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return", "Output: 1 Explanation: Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self,", "add increment/decrement counters by start/end. time_counters = [] for i in range(len(intervals)): insort(time_counters,", "number of conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation:", "intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity: O(n).", "def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) #", "int Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq if not intervals", "intervals: List[List[int]] :rtype: int Time complexity: O(n). Space complexity: O(n). \"\"\" from bisect", "consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum", "i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0,", "intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting rooms room1: (0,30)", "remove min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end", "[[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10], [2,", "minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity:", "to store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i in range(1,", "range(1, len(intervals)): # If next start time is after min end time, remove", "\"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n). Space complexity: O(n). \"\"\"", "import insort # Sort times and add increment/decrement counters by start/end. time_counters =", "start time. intervals.sort() # Use min heap to store end times. end_minhq =", "range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0 for", "rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10], [2, 4]]", "complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq if not intervals or not", "max_n = max(max_n, cur_n) return max_n def main(): # Output: 2. intervals =", "[[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting rooms room1: (0,30) room2: (5,10),(15,20)", "# Use min heap to store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1])", "insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0 for t, counter in time_counters:", "1. intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) if __name__", "min heap to store end times. end_minhq = [] heapq.heappush(end_minhq, intervals[0][1]) for i", "room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10], [2, 4]] Output:", "We need two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals =", "intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq", "intervals[0]: return 0 # Sort intervals by start time. intervals.sort() # Use min", "of conference rooms required. Example1 Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We", "heapq.heappush(end_minhq, intervals[0][1]) for i in range(1, len(intervals)): # If next start time is", "intervals or not intervals[0]: return 0 # Sort intervals by start time. intervals.sort()", "[[7, 10], [2, 4]] Output: 1 Explanation: Only need one meeting room \"\"\"", "Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals", "or not intervals[0]: return 0 # Sort intervals by start time. intervals.sort() #", "max_n def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals)", ":type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import", "# Sort times and add increment/decrement counters by start/end. time_counters = [] for", "II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting", "increment/decrement counters by start/end. time_counters = [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0],", "Input: intervals = [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting rooms room1:", "List[List[int]] :rtype: int Time complexity: O(n). Space complexity: O(n). \"\"\" from bisect import", "start/end. time_counters = [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1],", "\"\"\"Leetcode 253. Meeting Rooms II (Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of", "4]] Output: 1 Explanation: Only need one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def", "heapq if not intervals or not intervals[0]: return 0 # Sort intervals by", "heapq.heappop(end_minhq) # Add next end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq)", "need two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7,", "intervals = [[7, 10], [2, 4]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) if __name__ ==", "cur_n, max_n = 0, 0 for t, counter in time_counters: cur_n += counter", "one meeting room \"\"\" class SolutionSortEndMinHeapEnd(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]]", "next end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def", "= [[0,30],[5,10],[15,20]] Output: 2 Explanation: We need two meeting rooms room1: (0,30) room2:", "max_n = 0, 0 for t, counter in time_counters: cur_n += counter max_n", "in time_counters: cur_n += counter max_n = max(max_n, cur_n) return max_n def main():", "Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals", "intervals by start time. intervals.sort() # Use min heap to store end times.", "2 Explanation: We need two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input:", "# Sort intervals by start time. intervals.sort() # Use min heap to store", "min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type", "Time complexity: O(n). Space complexity: O(n). \"\"\" from bisect import insort # Sort", "insort(time_counters, (intervals[i][0], 1)) insort(time_counters, (intervals[i][1], -1)) cur_n, max_n = 0, 0 for t,", "= [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output: 1. intervals = [[7, 10],", "(5,10),(15,20) Example2 Input: intervals = [[7, 10], [2, 4]] Output: 1 Explanation: Only", "an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...]", "two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10],", "heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals: List[List[int]]", "\"\"\" :type intervals: List[List[int]] :rtype: int Time complexity: O(n*logn). Space complexity: O(n). \"\"\"", "next start time is after min end time, remove min end time. if", "heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self, intervals): \"\"\" :type intervals:", "end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object): def minMeetingRooms(self,", "[[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required. Example1", "Input: intervals = [[7, 10], [2, 4]] Output: 1 Explanation: Only need one", "< ei), find the minimum number of conference rooms required. Example1 Input: intervals", "Sort intervals by start time. intervals.sort() # Use min heap to store end", "complexity: O(n). \"\"\" import heapq if not intervals or not intervals[0]: return 0", "(si < ei), find the minimum number of conference rooms required. Example1 Input:", "https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting of start and end", "(0,30) room2: (5,10),(15,20) Example2 Input: intervals = [[7, 10], [2, 4]] Output: 1", "\"\"\" import heapq if not intervals or not intervals[0]: return 0 # Sort", "and add increment/decrement counters by start/end. time_counters = [] for i in range(len(intervals)):", "end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time to", "max(max_n, cur_n) return max_n def main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print", "Output: 2 Explanation: We need two meeting rooms room1: (0,30) room2: (5,10),(15,20) Example2", "Given an array of meeting time intervals consisting of start and end times", "main(): # Output: 2. intervals = [[0,30],[5,10],[15,20]] print SolutionSortEndMinHeapEnd().minMeetingRooms(intervals) print SolutionTimeCounterListInsort().minMeetingRooms(intervals) # Output:", "min end time. if intervals[i][0] >= end_minhq[0]: heapq.heappop(end_minhq) # Add next end time", "(Premium) Medium URL: https://leetcode.com/problems/meeting-rooms-ii Given an array of meeting time intervals consisting of", "counters by start/end. time_counters = [] for i in range(len(intervals)): insort(time_counters, (intervals[i][0], 1))", "Add next end time to min heap. heapq.heappush(end_minhq, intervals[i][1]) return len(end_minhq) class SolutionTimeCounterListInsort(object):", "Time complexity: O(n*logn). Space complexity: O(n). \"\"\" import heapq if not intervals or" ]
[ "flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\",", "bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\",", "backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity =", "lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first,", "lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating", "if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection =", "location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method =", "as bw import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME =", "collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05)", ") backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\",", ") backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first, method=method, amount=42) backend.CharacterizationFactor.create(flow=second, method=method,", "def basic_fixture(): NAME = \"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME,", "= backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\",", "reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow", "unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world =", "bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first =", "pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if NAME in bw.projects: #", "backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection", "collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15)", "= backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada =", "import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" #", "main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow,", "activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main", "backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if NAME in", "kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", )", "NAME = \"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection", "flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity", "bw import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\"", "basic_fixture(): NAME = \"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True)", "= backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5", "bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an", "name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow,", "backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world,", "name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\")", "= backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\",", "direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first, method=method, amount=42)", "lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\",", "amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\",", "import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if NAME in bw.projects:", "amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity =", ") lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow,", ") backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create(", "name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity,", "= backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second =", "activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first,", "\"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\")", "unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, )", "<reponame>brightway-lca/bw_default_backend<filename>tests/fixtures/create_fixtures.py<gh_stars>0 import bw_projects as bw import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def", "in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first", "backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, )", "dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada,", "kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow =", ") second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\")", "emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\",", "= backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\",", "first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another", "backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first, method=method, amount=42) backend.CharacterizationFactor.create(flow=second, method=method, amount=99)", "backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create(", "collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create(", "emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow", "food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second", "food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada,", "dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\",", "backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\",", "kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create(", "bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if", "import bw_projects as bw import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture():", "backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 )", "as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if NAME", "= backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity", "= backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating", "add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\",", "collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world", ") backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection,", "direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\",", "backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" )", "unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, )", "kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create(", "= backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\"", "name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity,", "name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\" ) second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection,", "reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method", "canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection )", "world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\",", "# bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create(", "backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection,", "backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow = backend.Flow.create( name=\"dinner", "# if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection", "name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\", unit=\"kg\", kind=\"technosphere\", collection=food_collection ) lunch_activity = backend.Activity.create(", "collection=food_collection ) lunch_activity = backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity,", "backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\",", "location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 ) backend.Exchange.create(activity=lunch_activity, flow=first, amount=0.05) dinner_flow =", "dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second,", ") world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch food\",", "= \"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection =", "NAME in bw.projects: # bw.projects.delete_project(NAME) bw.projects.create_project(NAME, add_base_data=True) biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\")", "second = backend.Flow.create( name=\"another emission\", kind=\"biosphere\", collection=biosphere_collection, unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada", "backend.Activity.create( name=\"eating lunch\", collection=food_collection, reference_product=lunch_flow, location=canada, ) backend.Exchange.create( activity=lunch_activity, flow=lunch_flow, direction=\"production\", amount=0.5 )", "dinner_flow = backend.Flow.create( name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create(", ") dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow,", "biosphere_collection = backend.Collection.create(name=\"biosphere\") food_collection = backend.Collection.create(name=\"food\") first = backend.Flow.create( name=\"an emission\", kind=\"biosphere\", collection=biosphere_collection,", "bw_projects as bw import bw_default_backend as backend import pytest @pytest.fixture(scope=\"function\") def basic_fixture(): NAME", "collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity,", "unit=\"kg\", ) world = backend.Geocollection.get(name=\"world\") canada = backend.Location.create(geocollection=world, name=\"Canada\") lunch_flow = backend.Flow.create( name=\"lunch", "= backend.Activity.create( name=\"eating dinner\", collection=food_collection, reference_product=dinner_flow, location=canada, ) backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25", "backend.Exchange.create( activity=dinner_activity, flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\"))", "name=\"dinner main dish\", unit=\"kg\", kind=\"technosphere\", collection=food_collection, ) dinner_activity = backend.Activity.create( name=\"eating dinner\", collection=food_collection,", "flow=dinner_flow, direction=\"production\", amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first, method=method,", "amount=0.25 ) backend.Exchange.create(activity=dinner_activity, flow=second, amount=0.15) method = backend.Method.create(name=(\"test\", \"method\")) backend.CharacterizationFactor.create(flow=first, method=method, amount=42) backend.CharacterizationFactor.create(flow=second,", "@pytest.fixture(scope=\"function\") def basic_fixture(): NAME = \"test-fixtures\" # if NAME in bw.projects: # bw.projects.delete_project(NAME)" ]
[ ". import ndarray try: from .gen_contrib import * except ImportError: pass __all__ =", "- 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) /", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "classes. num_sampled: int The number of classes to randomly sample. range_max: int The", "avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64", "check_input(data, ndarray.NDArray, \"data should be an NDArray or a list of NDArrays\") check_input(init_states,", "<NDArray 4 @cpu(0)> \"\"\" if ctx is None: ctx = current_context() log_range =", "= isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] states = init_states", "foreach(body, data, init_states): \"\"\"Run a for loop with user-defined computation over NDArrays on", "sampled classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled =", "+ 1.0)).log() / log_range * num_sampled # cast sampled classes to fp64 to", "def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an approximately log-uniform or", "License for the # specific language governing permissions and limitations # under the", "are in the range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') %", "ctx=ctx) # make sure sampled_classes are in the range of [0, range_max) sampled_classes", "make sure sampled_classes are in the range of [0, range_max) sampled_classes = (rand.exp()", "= [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and", "4, 5) >>> samples [1 3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true", "range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log()", "(ASF) under one # or more contributor license agreements. See the NOTICE file", "check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list): for i in inputs:", "[] for i in range(num_iters): if not_data_list: eles = data[i] else: eles =", "iteration. Examples -------- >>> step = lambda data, states: (data + states[0], [states[0]", "body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for", "have the same size as init_states. Similarly, out can be either an NDArray", "i in data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out) outs", "input data is NDArray: states = init_states outs = [] for i in", "permissions and limitations # under the License. # coding: utf-8 # pylint: disable=wildcard-import,", "not_data_list: eles = data[i] else: eles = [d[i] for d in data] outs,", "operator. Returns ------- outputs: an NDArray or a list of NDArrays. The output", "isinstance(inputs, list): for i in inputs: if not isinstance(i, in_type): is_NDArray_or_list = False", "software distributed under the License is distributed on an # \"AS IS\" BASIS,", "a for loop and body has the computation for an iteration of the", "has the computation for an iteration of the for loop. It runs the", "to randomly sample. range_max: int The number of possible classes. ctx : Context", "in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an NDArray or a", "range(num_iters): if not_data_list: eles = data[i] else: eles = [d[i] for d in", "log_range = math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) #", "the same size as data. states is a list of NDArrays and have", "outputs = [] for i in range(num_iters): if not_data_list: eles = data[i] else:", "be either an NDArray or a list of NDArrays, which are concatenated as", "NDArrays and has the same size as data. states is a list of", "states in the last iteration. Examples -------- >>> step = lambda data, states:", "slice from the input NDArrays. body takes two arguments as input and outputs", "[] for i in data.shape[0]: s = data[i] out, states = body(s, states)", "expected_count_sample: NDArray The expected count for sampled candidates in 1-D `float64` dtype. Examples", "list of NDArrays. If data is an NDArray, data1 is an NDArray. Otherwise,", "states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a", "a list of NDArrays and has the same size as data. states is", "data. init_states: an NDArray or a list of NDArrays. The initial values of", "dtype='float64', ctx=ctx) # make sure sampled_classes are in the range of [0, range_max)", "pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for loop with user-defined computation", "in data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out) outs =", "a list of NDArrays. The output data concatenated from the output of all", "[d[i] for d in data] outs, states = body(eles, states) outs = _as_list(outs)", "true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples", "False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data", "dtype. expected_count_sample: NDArray The expected count for sampled candidates in 1-D `float64` dtype.", "data: an NDArray or a list of NDArrays. The input data. init_states: an", "utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math from", "This operation randomly samples *num_sampled* candidates the range of integers [0, range_max). The", "expected count for sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls", "is a list of NDArrays and have the same size as init_states. Similarly,", "the same size as init_states. Similarly, out can be either an NDArray or", "of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or a list of", "under the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "context of output. Default is current context. Returns ------- samples: NDArray The sampled", "additional information # regarding copyright ownership. The ASF licenses this file # to", "target classes. num_sampled: int The number of classes to randomly sample. range_max: int", "# \"License\"); you may not use this file except in compliance # with", "an NDArray or a list of NDArrays, which are concatenated as the first", "returns the number of times each of the \\ true classes and the", "init_states: an NDArray or a list of NDArrays. The initial values of the", "Licensed to the Apache Software Foundation (ASF) under one # or more contributor", "classes and the sampled classes is expected to occur. Parameters ---------- true_classes :", "= init_states outputs = [] for i in range(num_iters): if not_data_list: eles =", "iteration. data: an NDArray or a list of NDArrays. The input data. init_states:", "or more contributor license agreements. See the NOTICE file # distributed with this", "a lexicon sorted in decreasing order of \\ frequency. If your classes are", "it also returns the number of times each of the \\ true classes", "1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled #", "are the second output of foreach. The computation done by this operator is", "OR CONDITIONS OF ANY # KIND, either express or implied. See the License", "Foundation (ASF) under one # or more contributor license agreements. See the NOTICE", "NDArray or a list of NDArrays. The output data concatenated from the output", "with replacement from the base distribution. The base distribution for this operator is", "License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\"", "The number of possible classes. ctx : Context Device context of output. Default", "Apache Software Foundation (ASF) under one # or more contributor license agreements. See", "init_states. Similarly, out can be either an NDArray or a list of NDArrays,", "in data] outs, states = body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs =", "uniform from ..base import _as_list from . import ndarray try: from .gen_contrib import", "sure sampled_classes are in the range of [0, range_max) sampled_classes = (rand.exp() -", "+ 1) This sampler is useful when the true classes approximately follow such", "data concatenated from the output of all iterations. states: a list of NDArrays.", "* 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs,", "break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should", "for d in data] outs, states = body(eles, states) outs = _as_list(outs) outputs.append(outs)", "randomly sample. range_max: int The number of possible classes. ctx : Context Device", "states is a list of NDArrays and have the same size as init_states.", "samples [1 3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray", "unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math from ..context import current_context from", "classes are not ordered by decreasing frequency, do not use this op. Additionaly,", "= sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range", "body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python function.", "are drawn with replacement from the base distribution. The base distribution for this", "in compliance # with the License. You may obtain a copy of the", "approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2) - log(class +", "implied. See the License for the # specific language governing permissions and limitations", "or agreed to in writing, # software distributed under the License is distributed", "or a list of NDArrays, which are concatenated as the first output of", "= mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True if", "def foreach(body, data, init_states): \"\"\"Run a for loop with user-defined computation over NDArrays", "data1 is a list of NDArrays and has the same size as data.", "randomly samples *num_sampled* candidates the range of integers [0, range_max). The elements of", "eles = [d[i] for d in data] outs, states = body(eles, states) outs", "\"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list): for i", "in body on each slice from the input NDArrays. body takes two arguments", "list): for i in inputs: if not isinstance(i, in_type): is_NDArray_or_list = False break", "represent words in a lexicon sorted in decreasing order of \\ frequency. If", "is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an", "license agreements. See the NOTICE file # distributed with this work for additional", "<NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [", "= True if isinstance(inputs, list): for i in inputs: if not isinstance(i, in_type):", "outputs = zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs =", "((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled # cast", "\"License\"); you may not use this file except in compliance # with the", "/ log(range_max + 1) This sampler is useful when the true classes approximately", "true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range *", "NDArrays. The output data concatenated from the output of all iterations. states: a", "of NDArrays and has the same size as data. states is a list", "Python function. Define computation in an iteration. data: an NDArray or a list", "classes in 1-D `int64` dtype. expected_count_true: NDArray The expected count for true classes", "0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is None: ctx = current_context()", "the License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of", "ordered by decreasing frequency, do not use this op. Additionaly, it also returns", "of NDArrays. If data is an NDArray, data1 is an NDArray. Otherwise, data1", "input data. init_states: an NDArray or a list of NDArrays. The initial values", "frequency, do not use this op. Additionaly, it also returns the number of", "language governing permissions and limitations # under the License. # coding: utf-8 #", "either express or implied. See the License for the # specific language governing", "= data.shape[0] if not_data_list else data[0].shape[0] states = init_states outputs = [] for", "data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs)", "isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an NDArray or", "should be an NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should", ">>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states =", "the base distribution. The base distribution for this operator is an approximately log-uniform", "\"\"\"Draw random samples from an approximately log-uniform or Zipfian distribution. This operation randomly", "iterations. states: a list of NDArrays. The loop states in the last iteration.", "NDArray or a list of NDArrays. The initial values of the loop states.", "of foreach; states from the last execution of body are the second output", "decreasing frequency, do not use this op. Additionaly, it also returns the number", "not use this file except in compliance # with the License. You may", "..context import current_context from ..random import uniform from ..base import _as_list from .", "dtype. expected_count_true: NDArray The expected count for true classes in 1-D `float64` dtype.", "for i in inputs: if not isinstance(i, in_type): is_NDArray_or_list = False break else:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "# or more contributor license agreements. See the NOTICE file # distributed with", "2) - log(class + 1)) / log(range_max + 1) This sampler is useful", "useful when the true classes approximately follow such a distribution. For example, if", "is current context. Returns ------- samples: NDArray The sampled candidate classes in 1-D", "sampled candidate classes in 1-D `int64` dtype. expected_count_true: NDArray The expected count for", "string. The name of the operator. Returns ------- outputs: an NDArray or a", ".gen_contrib import * except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def", "2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the", "/ log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint:", "= lambda data, states: (data + states[0], [states[0] * 2]) >>> data =", "data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has", "a list of NDArrays, which are concatenated as the first output of foreach;", "log-uniform or Zipfian distribution: P(class) = (log(class + 2) - log(class + 1))", ">>> step = lambda data, states: (data + states[0], [states[0] * 2]) >>>", "of the target classes. num_sampled: int The number of classes to randomly sample.", "a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list", "integers [0, range_max). The elements of sampled_candidates are drawn with replacement from the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "data.shape[0] if not_data_list else data[0].shape[0] states = init_states outputs = [] for i", "ctx=None): \"\"\"Draw random samples from an approximately log-uniform or Zipfian distribution. This operation", "(data + states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states", "concatenated as the first output of foreach; states from the last execution of", "data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list):", "int The number of possible classes. ctx : Context Device context of output.", "exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is", "= body(data1, states) data1 can be either an NDArray or a list of", "equivalent to the pseudo code below when the input data is NDArray: states", "2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return", "for true classes in 1-D `float64` dtype. expected_count_sample: NDArray The expected count for", "= data[i] else: eles = [d[i] for d in data] outs, states =", "pseudo code below when the input data is NDArray: states = init_states outs", "# regarding copyright ownership. The ASF licenses this file # to you under", "an NDArray. Otherwise, data1 is a list of NDArrays and has the same", "range_max, ctx=None): \"\"\"Draw random samples from an approximately log-uniform or Zipfian distribution. This", "output data concatenated from the output of all iterations. states: a list of", "more contributor license agreements. See the NOTICE file # distributed with this work", ">>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx", "expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled", "2.0) / (true_cls + 1.0)).log() / log_range * num_sampled # cast sampled classes", "of all iterations. states: a list of NDArrays. The loop states in the", "states = init_states outputs = [] for i in range(num_iters): if not_data_list: eles", "cast sampled classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled", "for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs) ==", "int The number of classes to randomly sample. range_max: int The number of", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "import _as_list from . import ndarray try: from .gen_contrib import * except ImportError:", "the output of all iterations. states: a list of NDArrays. The loop states", "-------- >>> step = lambda data, states: (data + states[0], [states[0] * 2])", "this operator is an approximately log-uniform or Zipfian distribution: P(class) = (log(class +", "the true classes approximately follow such a distribution. For example, if the classes", "loop. It runs the computation in body on each slice from the input", "list of NDArrays. The input data. init_states: an NDArray or a list of", "input NDArrays. body takes two arguments as input and outputs a tuple of", ">>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def", "a distribution. For example, if the classes represent words in a lexicon sorted", "in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list): for i in inputs: if", "sampler is useful when the true classes approximately follow such a distribution. For", "expected count for true classes in 1-D `float64` dtype. expected_count_sample: NDArray The expected", "The loop states in the last iteration. Examples -------- >>> step = lambda", "mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs,", "if isinstance(inputs, list): for i in inputs: if not isinstance(i, in_type): is_NDArray_or_list =", "from the output of all iterations. states: a list of NDArrays. The loop", "CONDITIONS OF ANY # KIND, either express or implied. See the License for", "states: a list of NDArrays. The loop states in the last iteration. Examples", "base distribution. The base distribution for this operator is an approximately log-uniform or", "= mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1", "not isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert", "work for additional information # regarding copyright ownership. The ASF licenses this file", "times each of the \\ true classes and the sampled classes is expected", "below when the input data is NDArray: states = init_states outs = []", "= expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body,", "+ 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled", "or a list of NDArrays. If data is an NDArray, data1 is an", "Context Device context of output. Default is current context. Returns ------- samples: NDArray", "for this operator is an approximately log-uniform or Zipfian distribution: P(class) = (log(class", "expected to occur. Parameters ---------- true_classes : NDArray A 1-D NDArray of the", "states = body(data1, states) data1 can be either an NDArray or a list", "size as data. states is a list of NDArrays and have the same", ": a Python function. Define computation in an iteration. data: an NDArray or", "rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are in", "licenses this file # to you under the Apache License, Version 2.0 (the", "\\ frequency. If your classes are not ordered by decreasing frequency, do not", "math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure", "two elements, as illustrated below: out, states = body(data1, states) data1 can be", "Examples -------- >>> step = lambda data, states: (data + states[0], [states[0] *", "below: out, states = body(data1, states) data1 can be either an NDArray or", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an approximately log-uniform", "True if isinstance(inputs, list): for i in inputs: if not isinstance(i, in_type): is_NDArray_or_list", "in decreasing order of \\ frequency. If your classes are not ordered by", "------- outputs: an NDArray or a list of NDArrays. The output data concatenated", "NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray", "+ 1)) / log(range_max + 1) This sampler is useful when the true", "else: eles = [d[i] for d in data] outs, states = body(eles, states)", "express or implied. See the License for the # specific language governing permissions", "= [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type,", "0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is None: ctx = current_context() log_range", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "from ..random import uniform from ..base import _as_list from . import ndarray try:", "computation in an iteration. data: an NDArray or a list of NDArrays. The", "on each slice from the input NDArrays. body takes two arguments as input", "operation randomly samples *num_sampled* candidates the range of integers [0, range_max). The elements", "count for sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls =", "from . import ndarray try: from .gen_contrib import * except ImportError: pass __all__", "of integers [0, range_max). The elements of sampled_candidates are drawn with replacement from", "of NDArrays. The output data concatenated from the output of all iterations. states:", "runs the computation in body on each slice from the input NDArrays. body", "you under the Apache License, Version 2.0 (the # \"License\"); you may not", "1.0)).log() / log_range * num_sampled # cast sampled classes to fp64 to avoid", "NDArrays. body takes two arguments as input and outputs a tuple of two", "/ (true_cls + 1.0)).log() / log_range * num_sampled # cast sampled classes to", "= isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an NDArray", "= ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled #", "= tmp_outputs if not_data_list and len(outputs) == 1: outputs = outputs[0] return (outputs,", "License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "by decreasing frequency, do not use this op. Additionaly, it also returns the", "[1 3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1", "tmp_outputs if not_data_list and len(outputs) == 1: outputs = outputs[0] return (outputs, states)", "classes. ctx : Context Device context of output. Default is current context. Returns", "each of the \\ true classes and the sampled classes is expected to", "((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled *", "for loop with user-defined computation over NDArrays on dimension 0. This operator simulates", "a for loop with user-defined computation over NDArrays on dimension 0. This operator", "1-D NDArray of the target classes. num_sampled: int The number of classes to", "has the same size as data. states is a list of NDArrays and", "concatenated from the output of all iterations. states: a list of NDArrays. The", "step = lambda data, states: (data + states[0], [states[0] * 2]) >>> data", "1) This sampler is useful when the true classes approximately follow such a", "NDArray of the target classes. num_sampled: int The number of classes to randomly", "<NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)>", "stack(*outs) Parameters ---------- body : a Python function. Define computation in an iteration.", "msg): is_NDArray_or_list = True if isinstance(inputs, list): for i in inputs: if not", "ctx is None: ctx = current_context() log_range = math.log(range_max + 1) rand =", "1-D `float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample", "import * except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes,", "data1 can be either an NDArray or a list of NDArrays. If data", "1)) / log(range_max + 1) This sampler is useful when the true classes", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "NDArrays. The loop states in the last iteration. Examples -------- >>> step =", "init_states outputs = [] for i in range(num_iters): if not_data_list: eles = data[i]", "\\ true classes and the sampled classes is expected to occur. Parameters ----------", "under the Apache License, Version 2.0 (the # \"License\"); you may not use", "of two elements, as illustrated below: out, states = body(data1, states) data1 can", "the second output of foreach. The computation done by this operator is equivalent", "of the \\ true classes and the sampled classes is expected to occur.", "Define computation in an iteration. data: an NDArray or a list of NDArrays.", "License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "sorted in decreasing order of \\ frequency. If your classes are not ordered", "tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs) == 1: outputs = outputs[0]", "[ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray", "distribution for this operator is an approximately log-uniform or Zipfian distribution: P(class) =", "msg check_input(data, ndarray.NDArray, \"data should be an NDArray or a list of NDArrays\")", "[ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is None:", "or a list of NDArrays. The initial values of the loop states. name:", "1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are", "or a list of NDArrays. The output data concatenated from the output of", "or implied. See the License for the # specific language governing permissions and", "1-D `int64` dtype. expected_count_true: NDArray The expected count for true classes in 1-D", "outputs: an NDArray or a list of NDArrays. The output data concatenated from", "count for true classes in 1-D `float64` dtype. expected_count_sample: NDArray The expected count", "same size as init_states. Similarly, out can be either an NDArray or a", "i in inputs: if not isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list", "data, init_states): \"\"\"Run a for loop with user-defined computation over NDArrays on dimension", "The base distribution for this operator is an approximately log-uniform or Zipfian distribution:", "ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] states = init_states outputs =", "with user-defined computation over NDArrays on dimension 0. This operator simulates a for", "an NDArray or a list of NDArrays. The output data concatenated from the", "distributed under the License is distributed on an # \"AS IS\" BASIS, WITHOUT", "`int64` dtype. expected_count_true: NDArray The expected count for true classes in 1-D `float64`", "data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step,", "The computation done by this operator is equivalent to the pseudo code below", "computation for an iteration of the for loop. It runs the computation in", "\"init_states should be an NDArray or a list of NDArrays\") not_data_list = isinstance(data,", "as init_states. Similarly, out can be either an NDArray or a list of", "NDArray The sampled candidate classes in 1-D `int64` dtype. expected_count_true: NDArray The expected", "sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() /", "expected_count_true: NDArray The expected count for true classes in 1-D `float64` dtype. expected_count_sample:", "distribution: P(class) = (log(class + 2) - log(class + 1)) / log(range_max +", "are not ordered by decreasing frequency, do not use this op. Additionaly, it", "NDArray The expected count for sampled candidates in 1-D `float64` dtype. Examples --------", "of output. Default is current context. Returns ------- samples: NDArray The sampled candidate", "be an NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be", "frequency. If your classes are not ordered by decreasing frequency, do not use", "# under the License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray", "# pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math from ..context", "when the true classes approximately follow such a distribution. For example, if the", "range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls =", "the range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls", "body has the computation for an iteration of the for loop. It runs", "illustrated below: out, states = body(data1, states) data1 can be either an NDArray", "either an NDArray or a list of NDArrays, which are concatenated as the", "data] outs, states = body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs)", "A 1-D NDArray of the target classes. num_sampled: int The number of classes", "The elements of sampled_candidates are drawn with replacement from the base distribution. The", "s = data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters", "if not isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs, in_type)", "= body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python", "@cpu(0)> \"\"\" if ctx is None: ctx = current_context() log_range = math.log(range_max +", "10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\"", "of MXNet.\"\"\" import math from ..context import current_context from ..random import uniform from", "a list of NDArrays. If data is an NDArray, data1 is an NDArray.", "states: (data + states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>>", "_as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out))", "Unless required by applicable law or agreed to in writing, # software distributed", "each slice from the input NDArrays. body takes two arguments as input and", "= true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range", "values of the loop states. name: string. The name of the operator. Returns", "NDArray, data1 is an NDArray. Otherwise, data1 is a list of NDArrays and", "your classes are not ordered by decreasing frequency, do not use this op.", "or a list of NDArrays. The input data. init_states: an NDArray or a", "distributed with this work for additional information # regarding copyright ownership. The ASF", "states) outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python function. Define", "of sampled_candidates are drawn with replacement from the base distribution. The base distribution", "`float64` dtype. expected_count_sample: NDArray The expected count for sampled candidates in 1-D `float64`", "number of times each of the \\ true classes and the sampled classes", "for i in range(num_iters): if not_data_list: eles = data[i] else: eles = [d[i]", "import math from ..context import current_context from ..random import uniform from ..base import", "pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an approximately", "regarding copyright ownership. The ASF licenses this file # to you under the", "exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879]", "_as_list from . import ndarray try: from .gen_contrib import * except ImportError: pass", "elements of sampled_candidates are drawn with replacement from the base distribution. The base", "data, states: (data + states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10))", ">>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3", "outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list =", "# KIND, either express or implied. See the License for the # specific", "a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or a", "initial values of the loop states. name: string. The name of the operator.", "and has the same size as data. states is a list of NDArrays", "this work for additional information # regarding copyright ownership. The ASF licenses this", "ANY # KIND, either express or implied. See the License for the #", "the \\ true classes and the sampled classes is expected to occur. Parameters", "eles = data[i] else: eles = [d[i] for d in data] outs, states", "= zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs", "P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)", "contributor license agreements. See the NOTICE file # distributed with this work for", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", ">>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>>", "MXNet.\"\"\" import math from ..context import current_context from ..random import uniform from ..base", "range of integers [0, range_max). The elements of sampled_candidates are drawn with replacement", "% range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls +", "outs = [] for i in data.shape[0]: s = data[i] out, states =", "NDArray API of MXNet.\"\"\" import math from ..context import current_context from ..random import", "exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3 3] <NDArray", "---------- true_classes : NDArray A 1-D NDArray of the target classes. num_sampled: int", "and have the same size as init_states. Similarly, out can be either an", "expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for loop", "Additionaly, it also returns the number of times each of the \\ true", "code below when the input data is NDArray: states = init_states outs =", "= init_states outs = [] for i in data.shape[0]: s = data[i] out,", "candidate classes in 1-D `int64` dtype. expected_count_true: NDArray The expected count for true", "is NDArray: states = init_states outs = [] for i in data.shape[0]: s", "The input data. init_states: an NDArray or a list of NDArrays. The initial", "the pseudo code below when the input data is NDArray: states = init_states", "in an iteration. data: an NDArray or a list of NDArrays. The input", "See the License for the # specific language governing permissions and limitations #", "list of NDArrays. The output data concatenated from the output of all iterations.", "range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true =", "expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data,", "The expected count for sampled candidates in 1-D `float64` dtype. Examples -------- >>>", "mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3", "the for loop. It runs the computation in body on each slice from", "be an NDArray or a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "should be an NDArray or a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray)", "2.0 (the # \"License\"); you may not use this file except in compliance", "0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is None: ctx =", "a list of NDArrays and have the same size as init_states. Similarly, out", "in 1-D `int64` dtype. expected_count_true: NDArray The expected count for true classes in", "i in range(num_iters): if not_data_list: eles = data[i] else: eles = [d[i] for", "of NDArrays, which are concatenated as the first output of foreach; states from", "The expected count for true classes in 1-D `float64` dtype. expected_count_sample: NDArray The", "size as init_states. Similarly, out can be either an NDArray or a list", "\"data should be an NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states", "KIND, either express or implied. See the License for the # specific language", "import ndarray try: from .gen_contrib import * except ImportError: pass __all__ = [\"rand_zipfian\"]", "the sampled classes is expected to occur. Parameters ---------- true_classes : NDArray A", "of times each of the \\ true classes and the sampled classes is", "for an iteration of the for loop. It runs the computation in body", "log(class + 1)) / log(range_max + 1) This sampler is useful when the", "The sampled candidate classes in 1-D `int64` dtype. expected_count_true: NDArray The expected count", "of the loop states. name: string. The name of the operator. Returns -------", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4", "limitations # under the License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib", "distribution. The base distribution for this operator is an approximately log-uniform or Zipfian", "order of \\ frequency. If your classes are not ordered by decreasing frequency,", "states from the last execution of body are the second output of foreach.", "num_iters = data.shape[0] if not_data_list else data[0].shape[0] states = init_states outputs = []", "in the last iteration. Examples -------- >>> step = lambda data, states: (data", "outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs", "arguments as input and outputs a tuple of two elements, as illustrated below:", "data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body", "NDArrays. The input data. init_states: an NDArray or a list of NDArrays. The", "such a distribution. For example, if the classes represent words in a lexicon", "decreasing order of \\ frequency. If your classes are not ordered by decreasing", "compliance # with the License. You may obtain a copy of the License", "of \\ frequency. If your classes are not ordered by decreasing frequency, do", "0. This operator simulates a for loop and body has the computation for", "by this operator is equivalent to the pseudo code below when the input", "body : a Python function. Define computation in an iteration. data: an NDArray", "rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an approximately log-uniform or Zipfian", "If your classes are not ordered by decreasing frequency, do not use this", "@cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879", "= False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray,", "do not use this op. Additionaly, it also returns the number of times", "if ctx is None: ctx = current_context() log_range = math.log(range_max + 1) rand", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See", "with the License. You may obtain a copy of the License at #", "+ 2) - log(class + 1)) / log(range_max + 1) This sampler is", "information # regarding copyright ownership. The ASF licenses this file # to you", "states = init_states outs = [] for i in data.shape[0]: s = data[i]", "+ states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states =", "Similarly, out can be either an NDArray or a list of NDArrays, which", "[\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from", "this op. Additionaly, it also returns the number of times each of the", "random samples from an approximately log-uniform or Zipfian distribution. This operation randomly samples", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "log_range * num_sampled # cast sampled classes to fp64 to avoid interget division", "NDArray or a list of NDArrays, which are concatenated as the first output", "one # or more contributor license agreements. See the NOTICE file # distributed", "[0, range_max). The elements of sampled_candidates are drawn with replacement from the base", "- log(class + 1)) / log(range_max + 1) This sampler is useful when", "exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3 3] <NDArray 4", "= current_context() log_range = math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64',", "except in compliance # with the License. You may obtain a copy of", "a tuple of two elements, as illustrated below: out, states = body(data1, states)", ">>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list", "not_data_list else data[0].shape[0] states = init_states outputs = [] for i in range(num_iters):", "if the classes represent words in a lexicon sorted in decreasing order of", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "you may not use this file except in compliance # with the License.", "an NDArray or a list of NDArrays. If data is an NDArray, data1", "distribution. This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).", "list of NDArrays, which are concatenated as the first output of foreach; states", "outputs = tmp_outputs if not_data_list and len(outputs) == 1: outputs = outputs[0] return", "context. Returns ------- samples: NDArray The sampled candidate classes in 1-D `int64` dtype.", "expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled =", "---------- body : a Python function. Define computation in an iteration. data: an", "NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] states", "in inputs: if not isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list =", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "follow such a distribution. For example, if the classes represent words in a", "None: ctx = current_context() log_range = math.log(range_max + 1) rand = uniform(0, log_range,", "approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates the range", "from ..base import _as_list from . import ndarray try: from .gen_contrib import *", "classes in 1-D `float64` dtype. expected_count_sample: NDArray The expected count for sampled candidates", "inputs: if not isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs,", "-------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)", "mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3 3] <NDArray 4 @cpu(0)> >>>", "= ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled", "init_states): \"\"\"Run a for loop with user-defined computation over NDArrays on dimension 0.", "out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ---------- body :", "candidates in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples,", "= mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3 3] <NDArray 4 @cpu(0)>", "an iteration. data: an NDArray or a list of NDArrays. The input data.", "current context. Returns ------- samples: NDArray The sampled candidate classes in 1-D `int64`", "__all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random", "an NDArray or a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters =", "The output data concatenated from the output of all iterations. states: a list", "def check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list): for i in", "to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) /", "this file # to you under the Apache License, Version 2.0 (the #", "is a list of NDArrays and has the same size as data. states", "states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out", "# # Unless required by applicable law or agreed to in writing, #", "and body has the computation for an iteration of the for loop. It", "Otherwise, data1 is a list of NDArrays and has the same size as", "can be either an NDArray or a list of NDArrays, which are concatenated", "candidates the range of integers [0, range_max). The elements of sampled_candidates are drawn", ">>> samples [1 3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879]", "list of NDArrays. The initial values of the loop states. name: string. The", "dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls,", "range_max: int The number of possible classes. ctx : Context Device context of", "import uniform from ..base import _as_list from . import ndarray try: from .gen_contrib", "\"\"\"Run a for loop with user-defined computation over NDArrays on dimension 0. This", "= data[i] out, states = body(s, states) outs.append(out) outs = stack(*outs) Parameters ----------", "list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or a list", "return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a", "If data is an NDArray, data1 is an NDArray. Otherwise, data1 is a", "Version 2.0 (the # \"License\"); you may not use this file except in", "for the # specific language governing permissions and limitations # under the License.", "(rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0)", "NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or a list of NDArrays\")", "zip(*outputs) tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if", "out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs) == 1:", "in the range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max", "out, states = body(data1, states) data1 can be either an NDArray or a", "list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else", "# coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import", "not ordered by decreasing frequency, do not use this op. Additionaly, it also", "outs.append(out) outs = stack(*outs) Parameters ---------- body : a Python function. Define computation", "OF ANY # KIND, either express or implied. See the License for the", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied.", "foreach. The computation done by this operator is equivalent to the pseudo code", "is equivalent to the pseudo code below when the input data is NDArray:", "\"\"\"Contrib NDArray API of MXNet.\"\"\" import math from ..context import current_context from ..random", "approximately follow such a distribution. For example, if the classes represent words in", "is useful when the true classes approximately follow such a distribution. For example,", "= (log(class + 2) - log(class + 1)) / log(range_max + 1) This", "* num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states):", "or Zipfian distribution. This operation randomly samples *num_sampled* candidates the range of integers", "sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>>", "states = body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs =", "in_type): is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg", "This sampler is useful when the true classes approximately follow such a distribution.", "number of classes to randomly sample. range_max: int The number of possible classes.", "of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64')", "of NDArrays. The loop states in the last iteration. Examples -------- >>> step", "as input and outputs a tuple of two elements, as illustrated below: out,", "be either an NDArray or a list of NDArrays. If data is an", "License, Version 2.0 (the # \"License\"); you may not use this file except", "..base import _as_list from . import ndarray try: from .gen_contrib import * except", "ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None):", "num_sampled # cast sampled classes to fp64 to avoid interget division sampled_cls_fp64 =", "iteration of the for loop. It runs the computation in body on each", "for i in data.shape[0]: s = data[i] out, states = body(s, states) outs.append(out)", "the target classes. num_sampled: int The number of classes to randomly sample. range_max:", "range_max). The elements of sampled_candidates are drawn with replacement from the base distribution.", "occur. Parameters ---------- true_classes : NDArray A 1-D NDArray of the target classes.", ": NDArray A 1-D NDArray of the target classes. num_sampled: int The number", "operator is an approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2)", "samples: NDArray The sampled candidate classes in 1-D `int64` dtype. expected_count_true: NDArray The", "sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for", "this file except in compliance # with the License. You may obtain a", "computation in body on each slice from the input NDArrays. body takes two", "loop states. name: string. The name of the operator. Returns ------- outputs: an", "Returns ------- samples: NDArray The sampled candidate classes in 1-D `int64` dtype. expected_count_true:", ">>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879", "may not use this file except in compliance # with the License. You", "num_sampled: int The number of classes to randomly sample. range_max: int The number", "states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))]", "not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] states =", "states. name: string. The name of the operator. Returns ------- outputs: an NDArray", "outs, states = body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs", "ASF licenses this file # to you under the Apache License, Version 2.0", "number of possible classes. ctx : Context Device context of output. Default is", "out can be either an NDArray or a list of NDArrays, which are", "4 @cpu(0)> \"\"\" if ctx is None: ctx = current_context() log_range = math.log(range_max", "[0, range_max) sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true", "NDArray or a list of NDArrays. The input data. init_states: an NDArray or", "uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are in the range", "(log(class + 2) - log(class + 1)) / log(range_max + 1) This sampler", "*num_sampled* candidates the range of integers [0, range_max). The elements of sampled_candidates are", "# distributed with this work for additional information # regarding copyright ownership. The", "loop with user-defined computation over NDArrays on dimension 0. This operator simulates a", "division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log()", "as data. states is a list of NDArrays and have the same size", "check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or a list of NDArrays\") not_data_list", "an NDArray or a list of NDArrays. The initial values of the loop", "Parameters ---------- body : a Python function. Define computation in an iteration. data:", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "output of foreach. The computation done by this operator is equivalent to the", "# pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an", "ndarray.NDArray, \"data should be an NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray,", "3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)>", "function. Define computation in an iteration. data: an NDArray or a list of", "possible classes. ctx : Context Device context of output. Default is current context.", "with this work for additional information # regarding copyright ownership. The ASF licenses", "/ log_range * num_sampled # cast sampled classes to fp64 to avoid interget", "done by this operator is equivalent to the pseudo code below when the", "current_context from ..random import uniform from ..base import _as_list from . import ndarray", "from the base distribution. The base distribution for this operator is an approximately", "the License. You may obtain a copy of the License at # #", "= _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out in outputs:", "is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data,", "3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample", "ctx = current_context() log_range = math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,),", "= uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are in the", "samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5) >>> samples [1 3 3 3]", "list of NDArrays and have the same size as init_states. Similarly, out can", "agreements. See the NOTICE file # distributed with this work for additional information", "a Python function. Define computation in an iteration. data: an NDArray or a", "the last execution of body are the second output of foreach. The computation", "writing, # software distributed under the License is distributed on an # \"AS", "It runs the computation in body on each slice from the input NDArrays.", "The initial values of the loop states. name: string. The name of the", "and limitations # under the License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import", "takes two arguments as input and outputs a tuple of two elements, as", "an approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2) - log(class", "drawn with replacement from the base distribution. The base distribution for this operator", ": Context Device context of output. Default is current context. Returns ------- samples:", "num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run", "NOTICE file # distributed with this work for additional information # regarding copyright", "true classes approximately follow such a distribution. For example, if the classes represent", "sample. range_max: int The number of possible classes. ctx : Context Device context", "to occur. Parameters ---------- true_classes : NDArray A 1-D NDArray of the target", "(sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true,", "body takes two arguments as input and outputs a tuple of two elements,", "is expected to occur. Parameters ---------- true_classes : NDArray A 1-D NDArray of", "# pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for loop with user-defined", "The name of the operator. Returns ------- outputs: an NDArray or a list", "current_context() log_range = math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx)", "enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for loop with user-defined computation over", "replacement from the base distribution. The base distribution for this operator is an", "pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw", "foreach; states from the last execution of body are the second output of", "the input NDArrays. body takes two arguments as input and outputs a tuple", "data is an NDArray, data1 is an NDArray. Otherwise, data1 is a list", "except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max,", "for sampled candidates in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3])", "shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are in the range of [0,", "if not_data_list else data[0].shape[0] states = init_states outputs = [] for i in", "NDArray or a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0]", "ndarray try: from .gen_contrib import * except ImportError: pass __all__ = [\"rand_zipfian\"] #", "output. Default is current context. Returns ------- samples: NDArray The sampled candidate classes", "loop states in the last iteration. Examples -------- >>> step = lambda data,", "list of NDArrays and has the same size as data. states is a", "this operator is equivalent to the pseudo code below when the input data", "Device context of output. Default is current context. Returns ------- samples: NDArray The", "the Apache License, Version 2.0 (the # \"License\"); you may not use this", "the range of integers [0, range_max). The elements of sampled_candidates are drawn with", "use this op. Additionaly, it also returns the number of times each of", "lambda data, states: (data + states[0], [states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2,", "op. Additionaly, it also returns the number of times each of the \\", "# specific language governing permissions and limitations # under the License. # coding:", "is None: ctx = current_context() log_range = math.log(range_max + 1) rand = uniform(0,", "# make sure sampled_classes are in the range of [0, range_max) sampled_classes =", "tuple of two elements, as illustrated below: out, states = body(data1, states) data1", "to the pseudo code below when the input data is NDArray: states =", "= (rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls +", "data is NDArray: states = init_states outs = [] for i in data.shape[0]:", "is an NDArray, data1 is an NDArray. Otherwise, data1 is a list of", "NDArrays. If data is an NDArray, data1 is an NDArray. Otherwise, data1 is", "last execution of body are the second output of foreach. The computation done", "classes represent words in a lexicon sorted in decreasing order of \\ frequency.", "data[0].shape[0] states = init_states outputs = [] for i in range(num_iters): if not_data_list:", "input and outputs a tuple of two elements, as illustrated below: out, states", "the input data is NDArray: states = init_states outs = [] for i", "is an approximately log-uniform or Zipfian distribution: P(class) = (log(class + 2) -", "a list of NDArrays. The input data. init_states: an NDArray or a list", "list of NDArrays. The loop states in the last iteration. Examples -------- >>>", "user-defined computation over NDArrays on dimension 0. This operator simulates a for loop", "The ASF licenses this file # to you under the Apache License, Version", "file except in compliance # with the License. You may obtain a copy", "outputs a tuple of two elements, as illustrated below: out, states = body(data1,", "NDArrays and have the same size as init_states. Similarly, out can be either", "= [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled, range_max, ctx=None): \"\"\"Draw random samples", "a list of NDArrays. The initial values of the loop states. name: string.", "log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes are in the range of", "log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long", "file # to you under the Apache License, Version 2.0 (the # \"License\");", "also returns the number of times each of the \\ true classes and", "try: from .gen_contrib import * except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint:", "= math.log(range_max + 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make", "@cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if", "coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math", "the number of times each of the \\ true classes and the sampled", "states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs,", "math from ..context import current_context from ..random import uniform from ..base import _as_list", "an NDArray or a list of NDArrays. The input data. init_states: an NDArray", "true classes in 1-D `float64` dtype. expected_count_sample: NDArray The expected count for sampled", "[mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg):", "can be either an NDArray or a list of NDArrays. If data is", "from .gen_contrib import * except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long", "ctx : Context Device context of output. Default is current context. Returns -------", "computation done by this operator is equivalent to the pseudo code below when", "NDArrays, which are concatenated as the first output of foreach; states from the", "= [] for i in range(num_iters): if not_data_list: eles = data[i] else: eles", "classes is expected to occur. Parameters ---------- true_classes : NDArray A 1-D NDArray", "name: string. The name of the operator. Returns ------- outputs: an NDArray or", "5) >>> samples [1 3 3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [", "(the # \"License\"); you may not use this file except in compliance #", "of NDArrays. The input data. init_states: an NDArray or a list of NDArrays.", "isinstance(i, in_type): is_NDArray_or_list = False break else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list,", "in range(num_iters): if not_data_list: eles = data[i] else: eles = [d[i] for d", "\"\"\" if ctx is None: ctx = current_context() log_range = math.log(range_max + 1)", "outs = stack(*outs) Parameters ---------- body : a Python function. Define computation in", "classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64", "and outputs a tuple of two elements, as illustrated below: out, states =", "Zipfian distribution. This operation randomly samples *num_sampled* candidates the range of integers [0,", "true classes and the sampled classes is expected to occur. Parameters ---------- true_classes", "loop and body has the computation for an iteration of the for loop.", "in 1-D `float64` dtype. expected_count_sample: NDArray The expected count for sampled candidates in", "Returns ------- outputs: an NDArray or a list of NDArrays. The output data", "is an NDArray. Otherwise, data1 is a list of NDArrays and has the", "for loop. It runs the computation in body on each slice from the", "of NDArrays. The initial values of the loop states. name: string. The name", "isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0] states = init_states outputs", "simulates a for loop and body has the computation for an iteration of", "law or agreed to in writing, # software distributed under the License is", "base distribution for this operator is an approximately log-uniform or Zipfian distribution: P(class)", "`float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample =", "0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\" if ctx is None: ctx", "words in a lexicon sorted in decreasing order of \\ frequency. If your", "# software distributed under the License is distributed on an # \"AS IS\"", "to you under the Apache License, Version 2.0 (the # \"License\"); you may", "example, if the classes represent words in a lexicon sorted in decreasing order", "outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = [] for out in", "+ 1) rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx) # make sure sampled_classes", "two arguments as input and outputs a tuple of two elements, as illustrated", "if not_data_list: eles = data[i] else: eles = [d[i] for d in data]", "and the sampled classes is expected to occur. Parameters ---------- true_classes : NDArray", "d in data] outs, states = body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs", "file # distributed with this work for additional information # regarding copyright ownership.", "# Licensed to the Apache Software Foundation (ASF) under one # or more", "on dimension 0. This operator simulates a for loop and body has the", "API of MXNet.\"\"\" import math from ..context import current_context from ..random import uniform", "of body are the second output of foreach. The computation done by this", "copyright ownership. The ASF licenses this file # to you under the Apache", "log(range_max + 1) This sampler is useful when the true classes approximately follow", "ownership. The ASF licenses this file # to you under the Apache License,", "assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an NDArray or a list", "= mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data,", "samples from an approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled*", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "states = mx.nd.contrib.foreach(step, data, states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True", "= [d[i] for d in data] outs, states = body(eles, states) outs =", "as the first output of foreach; states from the last execution of body", "classes approximately follow such a distribution. For example, if the classes represent words", "of the operator. Returns ------- outputs: an NDArray or a list of NDArrays.", "sampled_classes are in the range of [0, range_max) sampled_classes = (rand.exp() - 1).astype('int64')", "# Unless required by applicable law or agreed to in writing, # software", "init_states outs = [] for i in data.shape[0]: s = data[i] out, states", "of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if not_data_list else data[0].shape[0]", "pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math from ..context import", "sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "elements, as illustrated below: out, states = body(data1, states) data1 can be either", "second output of foreach. The computation done by this operator is equivalent to", "an NDArray or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an", "Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4,", "NDArray: states = init_states outs = [] for i in data.shape[0]: s =", "to in writing, # software distributed under the License is distributed on an", "output of all iterations. states: a list of NDArrays. The loop states in", "is_NDArray_or_list = True if isinstance(inputs, list): for i in inputs: if not isinstance(i,", "agreed to in writing, # software distributed under the License is distributed on", "the first output of foreach; states from the last execution of body are", "from ..context import current_context from ..random import uniform from ..base import _as_list from", "..random import uniform from ..base import _as_list from . import ndarray try: from", "is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be an NDArray or a list of", "the classes represent words in a lexicon sorted in decreasing order of \\", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "the loop states. name: string. The name of the operator. Returns ------- outputs:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "operator is equivalent to the pseudo code below when the input data is", "to the Apache Software Foundation (ASF) under one # or more contributor license", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() /", "when the input data is NDArray: states = init_states outs = [] for", "states) \"\"\" def check_input(inputs, in_type, msg): is_NDArray_or_list = True if isinstance(inputs, list): for", "NDArrays on dimension 0. This operator simulates a for loop and body has", "classes to randomly sample. range_max: int The number of possible classes. ctx :", "num_sampled, range_max, ctx=None): \"\"\"Draw random samples from an approximately log-uniform or Zipfian distribution.", "data. states is a list of NDArrays and have the same size as", "NDArray A 1-D NDArray of the target classes. num_sampled: int The number of", "to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 +", "last iteration. Examples -------- >>> step = lambda data, states: (data + states[0],", "expected_count_sampled # pylint: enable=line-too-long def foreach(body, data, init_states): \"\"\"Run a for loop with", "or a list of NDArrays\") not_data_list = isinstance(data, ndarray.NDArray) num_iters = data.shape[0] if", "not use this op. Additionaly, it also returns the number of times each", "import current_context from ..random import uniform from ..base import _as_list from . import", "use this file except in compliance # with the License. You may obtain", "else: is_NDArray_or_list = isinstance(inputs, in_type) assert is_NDArray_or_list, msg check_input(data, ndarray.NDArray, \"data should be", "either an NDArray or a list of NDArrays. If data is an NDArray,", "first output of foreach; states from the last execution of body are the", "a list of NDArrays. The loop states in the last iteration. Examples --------", "output of foreach; states from the last execution of body are the second", "the computation for an iteration of the for loop. It runs the computation", "NDArrays. The initial values of the loop states. name: string. The name of", "Software Foundation (ASF) under one # or more contributor license agreements. See the", "else data[0].shape[0] states = init_states outputs = [] for i in range(num_iters): if", "Zipfian distribution: P(class) = (log(class + 2) - log(class + 1)) / log(range_max", "the License is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "1-D `float64` dtype. expected_count_sample: NDArray The expected count for sampled candidates in 1-D", "all iterations. states: a list of NDArrays. The loop states in the last", "over NDArrays on dimension 0. This operator simulates a for loop and body", "interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 +", "fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64') expected_prob_sampled = ((sampled_cls_fp64 + 2.0)", "= body(eles, states) outs = _as_list(outs) outputs.append(outs) outputs = zip(*outputs) tmp_outputs = []", "body(data1, states) data1 can be either an NDArray or a list of NDArrays.", "sampled_candidates are drawn with replacement from the base distribution. The base distribution for", "body on each slice from the input NDArrays. body takes two arguments as", "the # specific language governing permissions and limitations # under the License. #", "* num_sampled # cast sampled classes to fp64 to avoid interget division sampled_cls_fp64", "The number of classes to randomly sample. range_max: int The number of possible", "outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs) == 1: outputs =", "See the NOTICE file # distributed with this work for additional information #", "or Zipfian distribution: P(class) = (log(class + 2) - log(class + 1)) /", "same size as data. states is a list of NDArrays and have the", "For example, if the classes represent words in a lexicon sorted in decreasing", "the NOTICE file # distributed with this work for additional information # regarding", "samples *num_sampled* candidates the range of integers [0, range_max). The elements of sampled_candidates", "in writing, # software distributed under the License is distributed on an #", "the Apache Software Foundation (ASF) under one # or more contributor license agreements.", "under the License. # coding: utf-8 # pylint: disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API", "disable=wildcard-import, unused-wildcard-import \"\"\"Contrib NDArray API of MXNet.\"\"\" import math from ..context import current_context", "an approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates the", "Default is current context. Returns ------- samples: NDArray The sampled candidate classes in", "Parameters ---------- true_classes : NDArray A 1-D NDArray of the target classes. num_sampled:", "lexicon sorted in decreasing order of \\ frequency. If your classes are not", "for loop and body has the computation for an iteration of the for", "ndarray.NDArray, \"init_states should be an NDArray or a list of NDArrays\") not_data_list =", "in a lexicon sorted in decreasing order of \\ frequency. If your classes", "from an approximately log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates", "------- samples: NDArray The sampled candidate classes in 1-D `int64` dtype. expected_count_true: NDArray", "of NDArrays and have the same size as init_states. Similarly, out can be", "mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>> outs, states = mx.nd.contrib.foreach(step, data, states)", "1 @cpu(0)> >>> exp_count_sample [ 0.22629439 0.12453879 0.12453879 0.12453879] <NDArray 4 @cpu(0)> \"\"\"", "or a list of NDArrays\") check_input(init_states, ndarray.NDArray, \"init_states should be an NDArray or", "sampled classes is expected to occur. Parameters ---------- true_classes : NDArray A 1-D", "NDArray or a list of NDArrays. If data is an NDArray, data1 is", "name of the operator. Returns ------- outputs: an NDArray or a list of", "# with the License. You may obtain a copy of the License at", "from the input NDArrays. body takes two arguments as input and outputs a", "from the last execution of body are the second output of foreach. The", "* except ImportError: pass __all__ = [\"rand_zipfian\"] # pylint: disable=line-too-long def rand_zipfian(true_classes, num_sampled,", "of the for loop. It runs the computation in body on each slice", "= [] for i in data.shape[0]: s = data[i] out, states = body(s,", "in 1-D `float64` dtype. Examples -------- >>> true_cls = mx.nd.array([3]) >>> samples, exp_count_true,", "expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes, expected_count_true, expected_count_sampled # pylint: enable=line-too-long def", "1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls + 2.0) / (true_cls", "[] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs)", "Apache License, Version 2.0 (the # \"License\"); you may not use this file", "an NDArray, data1 is an NDArray. Otherwise, data1 is a list of NDArrays", "the computation in body on each slice from the input NDArrays. body takes", "under one # or more contributor license agreements. See the NOTICE file #", "[states[0] * 2]) >>> data = mx.nd.random.uniform(shape=(2, 10)) >>> states = [mx.nd.random.uniform(shape=(10))] >>>", "# to you under the Apache License, Version 2.0 (the # \"License\"); you", "required by applicable law or agreed to in writing, # software distributed under", "specific language governing permissions and limitations # under the License. # coding: utf-8", "are concatenated as the first output of foreach; states from the last execution", "in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list and len(outputs) == 1: outputs", "tmp_outputs = [] for out in outputs: tmp_outputs.append(ndarray.op.stack(*out)) outputs = tmp_outputs if not_data_list", "of possible classes. ctx : Context Device context of output. Default is current", "NDArray The expected count for true classes in 1-D `float64` dtype. expected_count_sample: NDArray", "sampled_classes = (rand.exp() - 1).astype('int64') % range_max true_cls = true_classes.as_in_context(ctx).astype('float64') expected_count_true = ((true_cls", "+ 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled", "computation over NDArrays on dimension 0. This operator simulates a for loop and", "an iteration of the for loop. It runs the computation in body on", "states) data1 can be either an NDArray or a list of NDArrays. If", "the operator. Returns ------- outputs: an NDArray or a list of NDArrays. The", "data[i] else: eles = [d[i] for d in data] outs, states = body(eles,", "by applicable law or agreed to in writing, # software distributed under the", "distribution. For example, if the classes represent words in a lexicon sorted in", "of classes to randomly sample. range_max: int The number of possible classes. ctx", "dimension 0. This operator simulates a for loop and body has the computation", "for additional information # regarding copyright ownership. The ASF licenses this file #", "true_classes : NDArray A 1-D NDArray of the target classes. num_sampled: int The", "# cast sampled classes to fp64 to avoid interget division sampled_cls_fp64 = sampled_classes.astype('float64')", "execution of body are the second output of foreach. The computation done by", "operator simulates a for loop and body has the computation for an iteration", "the last iteration. Examples -------- >>> step = lambda data, states: (data +", "governing permissions and limitations # under the License. # coding: utf-8 # pylint:", "the License for the # specific language governing permissions and limitations # under", "3 3] <NDArray 4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>>", "This operator simulates a for loop and body has the computation for an", "/ (sampled_cls_fp64 + 1.0)).log() / log_range expected_count_sampled = expected_prob_sampled * num_sampled return sampled_classes,", "applicable law or agreed to in writing, # software distributed under the License", "which are concatenated as the first output of foreach; states from the last", "body are the second output of foreach. The computation done by this operator", "of foreach. The computation done by this operator is equivalent to the pseudo", "+ 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled # cast sampled", "NDArray. Otherwise, data1 is a list of NDArrays and has the same size", "4 @cpu(0)> >>> exp_count_true [ 0.12453879] <NDArray 1 @cpu(0)> >>> exp_count_sample [ 0.22629439", "log-uniform or Zipfian distribution. This operation randomly samples *num_sampled* candidates the range of", "= stack(*outs) Parameters ---------- body : a Python function. Define computation in an", "as illustrated below: out, states = body(data1, states) data1 can be either an", "(true_cls + 1.0)).log() / log_range * num_sampled # cast sampled classes to fp64" ]
[ "apply #41997 import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] =", "pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] = df.apply(lambda x: x[\"a\"],", "1.3: (intended?) Behavior change with empty apply #41997 import pandas as pd print(pd.__version__)", "change with empty apply #41997 import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\",", "with empty apply #41997 import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"])", "#41997 import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] = df.apply(lambda", "import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] = df.apply(lambda x:", "# 1.3: (intended?) Behavior change with empty apply #41997 import pandas as pd", "pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] = df.apply(lambda x: x[\"a\"], axis=1) print(df)", "as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"] = df.apply(lambda x: x[\"a\"], axis=1)", "(intended?) Behavior change with empty apply #41997 import pandas as pd print(pd.__version__) df", "empty apply #41997 import pandas as pd print(pd.__version__) df = pd.DataFrame(columns=[\"a\", \"b\"]) df[\"a\"]", "Behavior change with empty apply #41997 import pandas as pd print(pd.__version__) df =" ]
[ "1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27 =", "m.b159 == 1) m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 == 1)", "m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113", "59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24", "m.b178 + m.b179 + m.b180 == 1) m.c61 = Constraint(expr= m.b181 + m.b182", "- 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 +", "- 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 -", "1) m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 == 1) m.c52 =", "+ 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62", "- 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111", "14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 -", "+ 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143", "m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4 = Constraint(expr=", "- 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 -", "m.b106 + m.b107 + m.b108 == 1) m.c37 = Constraint(expr= m.b109 + m.b110", "+ 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 +", "m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167", "+ 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162*", "147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 +", "- 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 +", "47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 -", "+ m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 ==", "- 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44", "- 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66", "- 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185", "m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 =", "+ m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35 +", "m.b28 + m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32", "Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)", "73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50", "1) m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 == 1) m.c50 =", "m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107", "- 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64", "import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)", "38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 +", "== 1) m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 == 1) m.c46", "- 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138", "122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 +", "72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 +", "153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 =", "18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 =", "m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127", "m.b85 + m.b86 + m.b87 == 1) m.c30 = Constraint(expr= m.b88 + m.b89", "1) m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 == 1) m.c58 =", "m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1) m.c33 = Constraint(expr=", "Constraint(expr= m.b106 + m.b107 + m.b108 == 1) m.c37 = Constraint(expr= m.b109 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)", "C B # 65 65 0 0 0 0 0 0 # #", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 =", "64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 +", "m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 == 1) m.c37 = Constraint(expr=", "Constraint(expr= m.b118 + m.b119 + m.b120 == 1) m.c41 = Constraint(expr= m.b121 +", "+ m.b161 + m.b162 == 1) m.c55 = Constraint(expr= m.b163 + m.b164 +", "+ m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 =", "+ 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71", "m.b130 + m.b131 + m.b132 == 1) m.c45 = Constraint(expr= m.b133 + m.b134", "m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95", "m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1)", "m.b145 + m.b146 + m.b147 == 1) m.c50 = Constraint(expr= m.b148 + m.b149", "Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108", "36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 =", "+ 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 =", "# Variable counts # x b i s1s s2s sc si # Total", "- 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103", "m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48", "m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78", "234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88", "- 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100", "m.b187 + m.b188 + m.b189 == 1) m.c64 = Constraint(expr= m.b190 + m.b191", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)", "35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79*", "m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31", "+ m.b173 + m.b174 == 1) m.c59 = Constraint(expr= m.b175 + m.b176 +", "== 1) m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 == 1) m.c53", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 =", "Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)", "15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177", "Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)", "20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 +", "+ m.b81 == 1) m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 =", "193 192 0 # # Reformulation has removed 1 variable and 1 equation", "Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)", "1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25 =", "18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 -", "m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186", "Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 =", "110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175", "74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 +", "63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55*", "- 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191", "+ 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94", "22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 -", "- 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 -", "m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 == 1) m.c51 = Constraint(expr=", "+ 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187", "67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 -", "Constraint(expr= m.b79 + m.b80 + m.b81 == 1) m.c28 = Constraint(expr= m.b82 +", "Total E G L N X C B # 65 65 0 0", "1) m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 == 1) m.c60 =", "- 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192", "+ m.b134 + m.b135 == 1) m.c46 = Constraint(expr= m.b136 + m.b137 +", "97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 -", "== 1) m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 == 1) m.c62", "47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29*", "m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1) m.c28 = Constraint(expr=", "+ 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186", "1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19 =", "264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 +", "= Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79", "1) m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 == 1) m.c37 =", "m.b125 + m.b126 == 1) m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129", "+ 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105", "m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73", "+ m.b140 + m.b141 == 1) m.c48 = Constraint(expr= m.b142 + m.b143 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26", "3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 -", "83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170", "- 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177", "1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7 =", "+ 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74", "m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33", "- 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82", "Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57", "Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101", "Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)", "18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61", "127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8*", "73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55", "m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 == 1) m.c43 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 =", "65 0 0 0 0 0 0 # # Variable counts # x", "+ m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 ==", "m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1)", "73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 +", "m.b153 == 1) m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 == 1)", "m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115", "m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53", "+ m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 =", "- 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110", "Constraint(expr= m.b136 + m.b137 + m.b138 == 1) m.c47 = Constraint(expr= m.b139 +", "m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110", "130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 -", "1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 =", "- 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788*", "+ m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 ==", "m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141", "m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131", "- 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81", "Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)", "at 04/21/18 13:52:22 # # Equation counts # Total E G L N", "sint # 193 1 192 0 0 0 0 0 # FX 0", "= Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37", "m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126", "72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 -", "m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25", "36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 -", "m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111", "= Constraint(expr= m.b85 + m.b86 + m.b87 == 1) m.c30 = Constraint(expr= m.b88", "== 1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 =", "= Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28", "m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1)", "1) m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 == 1) m.c38 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 =", "# # Reformulation has removed 1 variable and 1 equation from pyomo.environ import", "+ 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 +", "67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171", "45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60", "m.b140 + m.b141 == 1) m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 =", "m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 -", "m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175", "- 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167", "m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 -", "m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61", "- 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56", "105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152", "m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129", "+ m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 ==", "= Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25", "m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70", "m.b117 == 1) m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 == 1)", "m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185", "+ 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 -", "74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24", "- 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 -", "m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152", "m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162", "Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4 +", "12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 -", "m.b181 + m.b182 + m.b183 == 1) m.c62 = Constraint(expr= m.b184 + m.b185", "- 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43", "43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 -", "== 1) m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 == 1) m.c60", "153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 -", "m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177", "- 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 -", "m.b22 + m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26", "m.b171 == 1) m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 == 1)", "145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 =", "m.b92 + m.b93 == 1) m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96", "+ m.b104 + m.b105 == 1) m.c36 = Constraint(expr= m.b106 + m.b107 +", "m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 =", "m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91", "+ 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112*", "m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 -", "0 0 0 0 # FX 0 0 0 0 0 0 0", "m.b122 + m.b123 == 1) m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126", "35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 +", "92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 +", "12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179", "m.b183 == 1) m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b111 == 1) m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 ==", "+ 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 =", "22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133", "199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 -", "m.b70 + m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74", "- 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 -", "- 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 =", "211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 -", "56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 =", "N X C B # 65 65 0 0 0 0 0 0", "+ 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 ,", "- 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 +", "Constraint(expr= m.b115 + m.b116 + m.b117 == 1) m.c40 = Constraint(expr= m.b118 +", "m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117", "+ 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1", "58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44 +", "= Constraint(expr= m.b160 + m.b161 + m.b162 == 1) m.c55 = Constraint(expr= m.b163", "- 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 -", "== 1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 =", "- 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181", "+ m.b120 == 1) m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 ==", "m.b144 == 1) m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 == 1)", "- 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 -", "- 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 -", "Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 =", "+ 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95", "m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68", "m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1)", "218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 +", "54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134", "m.b177 == 1) m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 == 1)", "m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101", "removed 1 variable and 1 equation from pyomo.environ import * model = m", "1) m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 == 1) m.c46 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 =", "+ 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 +", "NL DLL # 385 193 192 0 # # Reformulation has removed 1", "== 1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22", "+ 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 =", "== 1) m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 == 1) m.c59", "m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1) m.c29 = Constraint(expr=", "m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125", "Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 -", "m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46", "204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 -", "1) m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 == 1) m.c56 =", "+ m.b185 + m.b186 == 1) m.c63 = Constraint(expr= m.b187 + m.b188 +", "- 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182", "m.b169 + m.b170 + m.b171 == 1) m.c58 = Constraint(expr= m.b172 + m.b173", "- 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19", "*m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186", "110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174", "- 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 +", "Constraint(expr= m.b124 + m.b125 + m.b126 == 1) m.c43 = Constraint(expr= m.b127 +", "+ m.b168 == 1) m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj =", "+ 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 -", "211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156", "- 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166", "m.b34 + m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38", "+ m.b162 == 1) m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 ==", "- 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160", "Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16", "m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54", "+ 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 -", "m.b80 + m.b81 == 1) m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)", "277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3", "m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 == 1) m.c57 = Constraint(expr=", "35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 +", "- 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13", "+ 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17", "m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 =", "84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 -", "- 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 -", "- 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 -", "+ m.b90 == 1) m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 ==", "1) m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 == 1) m.c47 =", "= Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40", "122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 -", "- 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118", "117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18", "0 0 0 # # Nonzero counts # Total const NL DLL #", "Constraint(expr= m.b112 + m.b113 + m.b114 == 1) m.c39 = Constraint(expr= m.b115 +", "220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 -", "85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 -", "- 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 +", "== 1) m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 == 1) m.c37", "m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 == 1) m.c52 = Constraint(expr=", "- 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 +", "m.b134 + m.b135 == 1) m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138", "- 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 +", "+ 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67", "m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176", "+ 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 +", "+ m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74 +", "+ 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 -", "+ 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 -", "54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137", "m.b25 + m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29", "= Constraint(expr= m.b94 + m.b95 + m.b96 == 1) m.c33 = Constraint(expr= m.b97", "m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 =", "m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76", "Constraint(expr= m.b154 + m.b155 + m.b156 == 1) m.c53 = Constraint(expr= m.b157 +", "m.b7 + m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11", "+ m.b84 == 1) m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 ==", "m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155", "m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12", "== 1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14", "m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 =", "m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109", "Constraint(expr= m.b160 + m.b161 + m.b162 == 1) m.c55 = Constraint(expr= m.b163 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79 +", "Convert at 04/21/18 13:52:22 # # Equation counts # Total E G L", "+ 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40", "1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10 =", "m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52", "+ m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 ==", "0 0 # FX 0 0 0 0 0 0 0 0 #", "- 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104", "m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26 = Constraint(expr=", "85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 -", "m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169", "- 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 =", "m.b105 == 1) m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 =", "m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149", "has removed 1 variable and 1 equation from pyomo.environ import * model =", "+ 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 -", "m.b149 + m.b150 == 1) m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153", "+ m.b179 + m.b180 == 1) m.c61 = Constraint(expr= m.b181 + m.b182 +", "m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192", "+ m.b98 + m.b99 == 1) m.c34 = Constraint(expr= m.b100 + m.b101 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 =", "m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27 = Constraint(expr=", "54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76", "m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152", "177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 -", "m.b113 + m.b114 == 1) m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117", "58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 -", "Total const NL DLL # 385 193 192 0 # # Reformulation has", "m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1)", "- 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 -", "Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67 +", "B # 65 65 0 0 0 0 0 0 # # Variable", "ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)", "91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 +", "- 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35", "204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 =", "m.b121 + m.b122 + m.b123 == 1) m.c42 = Constraint(expr= m.b124 + m.b125", "m.b142 + m.b143 + m.b144 == 1) m.c49 = Constraint(expr= m.b145 + m.b146", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 =", "m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21", "+ 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184", "m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136", "m.b103 + m.b104 + m.b105 == 1) m.c36 = Constraint(expr= m.b106 + m.b107", "m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191", "= Constraint(expr= m.b163 + m.b164 + m.b165 == 1) m.c56 = Constraint(expr= m.b166", "m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4", "= Constraint(expr= m.b178 + m.b179 + m.b180 == 1) m.c61 = Constraint(expr= m.b181", "- 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54", "Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 -", "m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62", "m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150", "218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 +", "75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173", "m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13", "m.b110 + m.b111 == 1) m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114", "Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 =", "+ 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 +", "+ 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53", "+ m.b182 + m.b183 == 1) m.c62 = Constraint(expr= m.b184 + m.b185 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)", "67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 =", "92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 +", "1) m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 == 1) m.c45 =", "== 1) m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 == 1) m.c40", "+ m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 +", "+ m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 ==", "173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 +", "75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 -", "51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)", "45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364*", "97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 +", "m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1) m.c32 = Constraint(expr=", "Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)", "277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)", "35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 -", "m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181", "76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158", "+ 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 +", "== 1) m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1) m.c30", "Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 =", "b i s1s s2s sc si # Total cont binary integer sos1 sos2", "m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188", "87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 -", "m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 == 1) m.c50 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)", "32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189", "Constraint(expr= m.b85 + m.b86 + m.b87 == 1) m.c30 = Constraint(expr= m.b88 +", "m.b184 + m.b185 + m.b186 == 1) m.c63 = Constraint(expr= m.b187 + m.b188", "+ 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63", "- 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 =", "+ 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175", "- 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129", "- 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 +", "- 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144", "== 1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17", "# Equation counts # Total E G L N X C B #", "- 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131", "- 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 -", "m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45", "7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 +", "Constraint(expr= m.b187 + m.b188 + m.b189 == 1) m.c64 = Constraint(expr= m.b190 +", "3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 =", "116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192", "m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 == 1) m.c55 = Constraint(expr=", "model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 =", "- 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10", "- 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159", "Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)", "22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 =", "129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 -", "+ m.b188 + m.b189 == 1) m.c64 = Constraint(expr= m.b190 + m.b191 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)", "39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 -", "+ m.b170 + m.b171 == 1) m.c58 = Constraint(expr= m.b172 + m.b173 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)", "72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 =", "m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24 = Constraint(expr=", "35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 +", "35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31", "83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27", "m.b101 + m.b102 == 1) m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105", "- 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 =", "- 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166", "97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76", "65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044*", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187", "+ m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1) m.c36 = Constraint(expr=", "m.b116 + m.b117 == 1) m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120", "Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)", "MINLP written by GAMS Convert at 04/21/18 13:52:22 # # Equation counts #", "m.b19 + m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23", "- 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46", "+ 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 -", "- 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 +", "192 0 # # Reformulation has removed 1 variable and 1 equation from", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 =", "Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28 +", "127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9", "m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36", "m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81", "72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73*", "- 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37", "177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 =", "105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 +", "+ 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 =", "85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58", "0 0 0 0 # # Variable counts # x b i s1s", "275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162", "== 1) m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1) m.c33", "- 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153", "# # Nonzero counts # Total const NL DLL # 385 193 192", "m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b93 == 1) m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 ==", "+ m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29 +", "264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109", "- 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125", "39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149", "36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 +", "m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83", "Constraint(expr= m.b121 + m.b122 + m.b123 == 1) m.c42 = Constraint(expr= m.b124 +", "51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 -", "== 1) m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 == 1) m.c57", "Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76 +", "36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 +", "116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191", "189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119", "= Constraint(expr= m.b172 + m.b173 + m.b174 == 1) m.c59 = Constraint(expr= m.b175", "m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 =", "m.b109 + m.b110 + m.b111 == 1) m.c38 = Constraint(expr= m.b112 + m.b113", "m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 -", "45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59", "== 1) m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1) m.c34", "+ m.b147 == 1) m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 ==", "*m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185", "156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133", "m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29", "+ m.b105 == 1) m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 =", "m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139", "Constraint(expr= m.b100 + m.b101 + m.b102 == 1) m.c35 = Constraint(expr= m.b103 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 =", "Constraint(expr= m.b82 + m.b83 + m.b84 == 1) m.c29 = Constraint(expr= m.b85 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)", "65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 +", "Constraint(expr= m.b184 + m.b185 + m.b186 == 1) m.c63 = Constraint(expr= m.b187 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)", "73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 +", "m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166", "m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92", "m.b137 + m.b138 == 1) m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141", "- 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148", "- 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192", "m.b135 == 1) m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 == 1)", "m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39", "83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172", "59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28", "169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 -", "1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17 =", "m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128", "m.b112 + m.b113 + m.b114 == 1) m.c39 = Constraint(expr= m.b115 + m.b116", "130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 -", "m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17 = Constraint(expr=", "- 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 =", "+ m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38 +", "- 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145", "- 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176", "m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 -", "m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 == 1) m.c53 = Constraint(expr=", "m.b100 + m.b101 + m.b102 == 1) m.c35 = Constraint(expr= m.b103 + m.b104", "m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 =", "1 equation from pyomo.environ import * model = m = ConcreteModel() m.b1 =", "8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 -", "m.b55 + m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59", "== 1) m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 == 1) m.c50", "m.b141 == 1) m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 =", "m.b163 + m.b164 + m.b165 == 1) m.c56 = Constraint(expr= m.b166 + m.b167", "- 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95", "cont binary integer sos1 sos2 scont sint # 193 1 192 0 0", "== 1) m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1) m.c35", "+ 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183", "22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 +", "- 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100", "m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82", "- 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 -", "m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 == 1) m.c40 = Constraint(expr=", "1) m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 == 1) m.c64 =", "+ m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 ==", "59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 =", "- 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96", "= Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64", "m.b115 + m.b116 + m.b117 == 1) m.c40 = Constraint(expr= m.b118 + m.b119", "1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6 =", "+ m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62 +", "- 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b16 + m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20", "m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3 = Constraint(expr=", "- 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 -", "75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191", "Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)", "*m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146", "66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 -", "Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64 +", "Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13 +", "- 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115", "m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116", "1) m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 == 1) m.c48 =", "1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 -", "m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80", "+ m.b125 + m.b126 == 1) m.c43 = Constraint(expr= m.b127 + m.b128 +", "m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90", "Constraint(expr= m.b97 + m.b98 + m.b99 == 1) m.c34 = Constraint(expr= m.b100 +", "97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57", "m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47", "114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 -", "== 1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21", "m.b157 + m.b158 + m.b159 == 1) m.c54 = Constraint(expr= m.b160 + m.b161", "Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)", "1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9 =", "m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33", "Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440*", "1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8 =", "+ 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 +", "2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 -", "*m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 -", "m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1)", "m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108", "Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)", "* model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3", "- 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86", "- 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73 +", "m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 +", "m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55", "- 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134*", "m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1)", "- 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 +", "77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 -", "m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142", "- 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 -", "28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 -", "m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 =", "m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10 = Constraint(expr=", "Constraint(expr= m.b172 + m.b173 + m.b174 == 1) m.c59 = Constraint(expr= m.b175 +", "+ 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116", "- 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110", "1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 =", "1) m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 == 1) m.c49 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169", "47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 -", "+ 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114", "+ 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151", "- 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171", "158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183", "39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144", "m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 =", "m.b84 == 1) m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)", "173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15", "m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27", "Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b152 + m.b153 == 1) m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156", "m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 == 1) m.c56 = Constraint(expr=", "- 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188", "+ m.b123 == 1) m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 ==", "m.b4 + m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8", "== 1) m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 == 1) m.c54", "= Constraint(expr= m.b184 + m.b185 + m.b186 == 1) m.c63 = Constraint(expr= m.b187", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b138 == 1) m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 ==", "130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 -", "54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 -", "+ m.b146 + m.b147 == 1) m.c50 = Constraint(expr= m.b148 + m.b149 +", "+ m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 ==", "m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19 = Constraint(expr=", "m.b96 == 1) m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1)", "66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1) m.c32", "68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 +", "m.b119 + m.b120 == 1) m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123", "+ m.b86 + m.b87 == 1) m.c30 = Constraint(expr= m.b88 + m.b89 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 =", "# Total cont binary integer sos1 sos2 scont sint # 193 1 192", "m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 == 1) m.c64 = Constraint(expr=", "m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93", "+ 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180", "- 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43", "- 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112", "+ 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 +", "1) m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 == 1) m.c53 =", "+ 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124", "m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132", "Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)", "87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 =", "== 1) m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1) m.c31", "= Constraint(expr= m.b175 + m.b176 + m.b177 == 1) m.c60 = Constraint(expr= m.b178", "65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176", "0 # FX 0 0 0 0 0 0 0 0 # #", "- 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 +", "+ 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 +", "29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)", "105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 -", "+ 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 +", "m.b186 == 1) m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 == 1)", "162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167", "counts # Total E G L N X C B # 65 65", "m.b108 == 1) m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 == 1)", "+ 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38", "- 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 +", "1) m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1) m.c36 =", "m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122", "m.b94 + m.b95 + m.b96 == 1) m.c33 = Constraint(expr= m.b97 + m.b98", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 =", "- 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187", "- 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65", "X C B # 65 65 0 0 0 0 0 0 #", "Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34", "24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32", "= Constraint(expr= m.b115 + m.b116 + m.b117 == 1) m.c40 = Constraint(expr= m.b118", "- 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154", "- 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32", "m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 =", "m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160", "234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 +", "m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6", "+ m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 ==", "m.b132 == 1) m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 == 1)", "Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7 +", "m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51", "36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56", "1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15 =", "m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100", "Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 =", "1 variable and 1 equation from pyomo.environ import * model = m =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 =", "- 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 -", "77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 =", "35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b108 == 1) m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 =", "+ m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11", "Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 == 1) m.c54 = Constraint(expr=", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 =", "- 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174", "m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155", "+ m.b141 == 1) m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180", "m.b1 + m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5", "- 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 -", "m.b136 + m.b137 + m.b138 == 1) m.c47 = Constraint(expr= m.b139 + m.b140", "+ 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 =", "189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 =", "m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 == 1) m.c46 = Constraint(expr=", "+ m.b158 + m.b159 == 1) m.c54 = Constraint(expr= m.b160 + m.b161 +", "- 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131", "Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)", "95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 -", "- 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33", "+ m.b95 + m.b96 == 1) m.c33 = Constraint(expr= m.b97 + m.b98 +", "19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132", "Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52 +", "m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11", "68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 -", "+ 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 =", "m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66", "- 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190", "Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 -", "+ 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 -", "+ m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 ==", "sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2 =", "m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69", "153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 +", "- 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36", "+ m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5 +", "m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 == 1) m.c39 = Constraint(expr=", "72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 +", "Constraint(expr= m.b157 + m.b158 + m.b159 == 1) m.c54 = Constraint(expr= m.b160 +", "m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 == 1) m.c41 = Constraint(expr=", "1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13 =", "and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.b1", "38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99*", "1) m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1) m.c35 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 =", "m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119", "m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143", "+ 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 -", "+ 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47", "*m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92", "- 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)", "1) m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 == 1) m.c62 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143*", "+ m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157", "m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 == 1) m.c62 = Constraint(expr=", "- 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148", "m.b76 + m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80", "18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 -", "0 0 0 0 0 # # Variable counts # x b i", "Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)", "77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73", "E G L N X C B # 65 65 0 0 0", "m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75", "154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148*", "- 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54", "Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19", "- 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111", "- 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 -", "- 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 -", "m.b127 + m.b128 + m.b129 == 1) m.c44 = Constraint(expr= m.b130 + m.b131", "+ 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70", "+ 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70", "Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 -", "= Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 =", "Constraint(expr= m.b133 + m.b134 + m.b135 == 1) m.c46 = Constraint(expr= m.b136 +", "+ 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)", "22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 -", "m.b143 + m.b144 == 1) m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147", "m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173", "= Constraint(expr= m.b79 + m.b80 + m.b81 == 1) m.c28 = Constraint(expr= m.b82", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 =", "64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 -", "- 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 -", "- 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79", "- 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89", "- 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138", "145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 +", "73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 =", "+ 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147", "Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)", "1 192 0 0 0 0 0 # FX 0 0 0 0", "Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 -", "m.b175 + m.b176 + m.b177 == 1) m.c60 = Constraint(expr= m.b178 + m.b179", "== 1) m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 == 1) m.c63", "- 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 -", "+ 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 +", "m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65", "+ 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 -", "m.b126 == 1) m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 == 1)", "- 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184", "Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 =", "- 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150", "= Constraint(expr= m.b91 + m.b92 + m.b93 == 1) m.c32 = Constraint(expr= m.b94", "129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)", "62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 -", "Constraint(expr= m.b145 + m.b146 + m.b147 == 1) m.c50 = Constraint(expr= m.b148 +", "- 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183", "m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 == 1) m.c38 = Constraint(expr=", "m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12", "Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 =", "- 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127", "14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130", "164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 +", "+ 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 -", "35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30", "m.b133 + m.b134 + m.b135 == 1) m.c46 = Constraint(expr= m.b136 + m.b137", "159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 -", "+ m.b131 + m.b132 == 1) m.c45 = Constraint(expr= m.b133 + m.b134 +", "+ m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48 ==", "- 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104", "Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 =", "+ 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr=", "218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 +", "0 # # Variable counts # x b i s1s s2s sc si", "+ 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107", "- 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165", "+ 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141", "- 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168", "158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 =", "67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46*", "m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120", "- 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 +", "== 1) m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 == 1) m.c55", "== 1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16", "130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15", "= Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7", "+ m.b164 + m.b165 == 1) m.c56 = Constraint(expr= m.b166 + m.b167 +", "== 1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18", "+ m.b132 == 1) m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135 ==", "189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590*", "== 1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1) m.c10", "- 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 -", "m.b86 + m.b87 == 1) m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90", "Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172", "2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48", "84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61", "= Constraint(expr= m.b148 + m.b149 + m.b150 == 1) m.c51 = Constraint(expr= m.b151", "= Constraint(expr= m.b166 + m.b167 + m.b168 == 1) m.c57 = Constraint(expr= m.b169", "- 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 =", "- 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69", "- 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 -", "35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 +", "+ 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 +", "64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119", "m.b88 + m.b89 + m.b90 == 1) m.c31 = Constraint(expr= m.b91 + m.b92", "m.b162 == 1) m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 == 1)", "- 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80", "Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)", "15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 +", "m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44", "- 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55", "m.b131 + m.b132 == 1) m.c45 = Constraint(expr= m.b133 + m.b134 + m.b135", "+ 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 +", "== 1) m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 == 1) m.c49", "+ 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70", "+ 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120", "+ 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b156 == 1) m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 == 1)", "117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 +", "m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 =", "75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 +", "m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42", "# x b i s1s s2s sc si # Total cont binary integer", "59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize)", "15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48", "m.b161 + m.b162 == 1) m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165", "1) m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1) m.c33 =", "m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51", "= Constraint(expr= m.b151 + m.b152 + m.b153 == 1) m.c52 = Constraint(expr= m.b154", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 =", "95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 -", "m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130", "162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 +", "m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11 = Constraint(expr=", "m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57", "Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97", "- 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106*", "m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 == 1) m.c6 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 == 1) m.c63 = Constraint(expr=", "64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 -", "m.b170 + m.b171 == 1) m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174", "+ m.b153 == 1) m.c52 = Constraint(expr= m.b154 + m.b155 + m.b156 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)", "Equation counts # Total E G L N X C B # 65", "m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24", "+ m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 ==", "m.b64 + m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68", "117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 +", "+ 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156", "- 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178", "m.b166 + m.b167 + m.b168 == 1) m.c57 = Constraint(expr= m.b169 + m.b170", "156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764*", "29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136", "== 1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25", "- 147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20", "Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)", "130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 +", "+ m.b126 == 1) m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 ==", "+ 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94", "+ m.b80 + m.b81 == 1) m.c28 = Constraint(expr= m.b82 + m.b83 +", "Variable counts # x b i s1s s2s sc si # Total cont", "32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b83 + m.b84 == 1) m.c29 = Constraint(expr= m.b85 + m.b86 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)", "193 1 192 0 0 0 0 0 # FX 0 0 0", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 =", "173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 +", ", sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2", "44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39", "Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b138 == 1) m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 == 1)", "s2s sc si # Total cont binary integer sos1 sos2 scont sint #", "= Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13", "189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321*", "m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47 + m.b48", "== 1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 =", "m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174", "275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161", "+ 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46", "+ 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b112 + m.b113 + m.b114 == 1) m.c39 = Constraint(expr= m.b115", "- 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172", "6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102", "- 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34", "# # Variable counts # x b i s1s s2s sc si #", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 =", "+ m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17 +", "2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52", "m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138", "m.b118 + m.b119 + m.b120 == 1) m.c41 = Constraint(expr= m.b121 + m.b122", "- 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 -", "m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96", "77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 -", "m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17", "220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45", "== 1) m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 == 1) m.c58", "+ m.b174 == 1) m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 =", "1) m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1) m.c29 =", "m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34", "+ m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26 +", "+ m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10 + m.b11 +", "1) m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 == 1) m.c44 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 =", "153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 +", "= Constraint(expr= m.b130 + m.b131 + m.b132 == 1) m.c45 = Constraint(expr= m.b133", "0 0 0 0 0 0 # # Variable counts # x b", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 =", "m.b151 + m.b152 + m.b153 == 1) m.c52 = Constraint(expr= m.b154 + m.b155", "m.b46 + m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50", "+ m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 ==", "= Constraint(expr= m.b82 + m.b83 + m.b84 == 1) m.c29 = Constraint(expr= m.b85", "1) m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 == 1) m.c57 =", "Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46 +", "= Constraint(expr= m.b154 + m.b155 + m.b156 == 1) m.c53 = Constraint(expr= m.b157", "+ 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72", "= Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19", "Constraint(expr= m.b181 + m.b182 + m.b183 == 1) m.c62 = Constraint(expr= m.b184 +", "# # Equation counts # Total E G L N X C B", "47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31", "147716*m.b15*m.b39 + 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 +", "m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 == 1) m.c44 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 +", "+ m.b165 == 1) m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 ==", "204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 -", "== 1) m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1) m.c36", "Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9", "m.b154 + m.b155 + m.b156 == 1) m.c53 = Constraint(expr= m.b157 + m.b158", "m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147", "m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112", "- 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125*", "1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26 =", "+ 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 -", "1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22 =", "159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)", "0 0 0 # # Variable counts # x b i s1s s2s", "84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 -", "45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 -", "m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76", "Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11", "+ 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120", "19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 -", "m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 == 1) m.c45 = Constraint(expr=", "56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 +", "m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 =", "- 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169", "m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35", "m.b93 == 1) m.c32 = Constraint(expr= m.b94 + m.b95 + m.b96 == 1)", "Total cont binary integer sos1 sos2 scont sint # 193 1 192 0", "Constraint(expr= m.b103 + m.b104 + m.b105 == 1) m.c36 = Constraint(expr= m.b106 +", "85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 -", "m.b185 + m.b186 == 1) m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189", "17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143", "= Constraint(expr= m.b109 + m.b110 + m.b111 == 1) m.c38 = Constraint(expr= m.b112", "0 0 0 0 0 # # Nonzero counts # Total const NL", "m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75", "77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 -", "73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 +", "+ m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 =", "- 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 -", "+ 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94", "m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1)", "- 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 =", "- 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 -", "m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13 = Constraint(expr=", "m.b164 + m.b165 == 1) m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168", "+ 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 -", "m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9", "m.b111 == 1) m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 == 1)", "= Constraint(expr= m.b127 + m.b128 + m.b129 == 1) m.c44 = Constraint(expr= m.b130", "+ 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137", "= Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49", "Constraint(expr= m.b46 + m.b47 + m.b48 == 1) m.c17 = Constraint(expr= m.b49 +", "0 0 0 0 0 # FX 0 0 0 0 0 0", "m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104", "+ m.b186 == 1) m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 ==", "m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182", "0 0 0 0 # # Nonzero counts # Total const NL DLL", "m.b35 + m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39", "385 193 192 0 # # Reformulation has removed 1 variable and 1", "+ m.b155 + m.b156 == 1) m.c53 = Constraint(expr= m.b157 + m.b158 +", "+ 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 =", "0 0 0 0 0 0 0 0 # # Nonzero counts #", "47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30", "- 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 -", "154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 -", "m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106", "m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78", "m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124", "Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)", "const NL DLL # 385 193 192 0 # # Reformulation has removed", "77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53 -", "90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 +", "1) m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 == 1) m.c63 =", "- 77871*m.b29* m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158*", "1) m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 == 1) m.c51 =", "87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124", "+ 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 -", "m.b104 + m.b105 == 1) m.c36 = Constraint(expr= m.b106 + m.b107 + m.b108", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 =", "# 385 193 192 0 # # Reformulation has removed 1 variable and", "m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145", "+ m.b107 + m.b108 == 1) m.c37 = Constraint(expr= m.b109 + m.b110 +", "m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21", "DLL # 385 193 192 0 # # Reformulation has removed 1 variable", "counts # x b i s1s s2s sc si # Total cont binary", "m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 == 1) m.c58 = Constraint(expr=", "- 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 -", "+ 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86", "m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8", "m.b180 == 1) m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 == 1)", "m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71", "m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89", "8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168", "74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 =", "44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41", "156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134", "234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 +", "- 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180", "- 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84", "m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1) m.c22 = Constraint(expr=", "== 1) m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111 == 1) m.c38", "m.b174 == 1) m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 == 1)", "61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88", "- 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170*", "m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101", "+ 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 -", "+ m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23 +", "+ 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71", "+ 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 +", "m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 +", "Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10 +", "# MINLP written by GAMS Convert at 04/21/18 13:52:22 # # Equation counts", "= Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70", "Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42", "m.b129 == 1) m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 == 1)", "35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 +", "- 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 =", "m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60", "m.b26 + m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30", "15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 =", "0 0 0 0 0 0 0 # # Nonzero counts # Total", "+ 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118", "56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 +", "199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36", "59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 =", "m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77", "si # Total cont binary integer sos1 sos2 scont sint # 193 1", "== 1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1) m.c26", "21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18*", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 =", "m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98", "m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14 = Constraint(expr=", "= Constraint(expr= m.b139 + m.b140 + m.b141 == 1) m.c48 = Constraint(expr= m.b142", "- 63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59", "61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29", "Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190 - 277077*m.b188*m.b191 -", "+ 130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119", "m.b172 + m.b173 + m.b174 == 1) m.c59 = Constraint(expr= m.b175 + m.b176", "+ m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65 +", "m.b37 + m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40 + m.b41", "+ m.b183 == 1) m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186 ==", "61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86", "m.b176 + m.b177 == 1) m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180", "m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164", "== 1) m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 == 1) m.c61", "m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1) m.c34 = Constraint(expr=", "Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 =", "sos2 scont sint # 193 1 192 0 0 0 0 0 #", "m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182", "m.b83 + m.b84 == 1) m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87", "+ 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71", "90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 +", "- 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149*", "147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 - 147716*m.b15*m.b39", "220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 +", "m.b72 == 1) m.c25 = Constraint(expr= m.b73 + m.b74 + m.b75 == 1)", "== 1) m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 == 1) m.c39", "= Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22", "m.b58 + m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62", "m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 =", "m.b31 + m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35", "+ m.b113 + m.b114 == 1) m.c39 = Constraint(expr= m.b115 + m.b116 +", "114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81", "# FX 0 0 0 0 0 0 0 0 # # Nonzero", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 +", "m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23", "105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 -", "G L N X C B # 65 65 0 0 0 0", "51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153", "70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105", "64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162", "m.b99 == 1) m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1)", "m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168", "- 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12", "m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179", "+ 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158", "== 1) m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 == 1) m.c43", "L N X C B # 65 65 0 0 0 0 0", "m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158", "45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173", "m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1)", "== 1) m.c55 = Constraint(expr= m.b163 + m.b164 + m.b165 == 1) m.c56", "- 277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 +", "+ 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79", "17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 -", "m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43", "- 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 == 1) m.c35 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1) m.c13", "+ m.b144 == 1) m.c49 = Constraint(expr= m.b145 + m.b146 + m.b147 ==", "+ 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 +", "- 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b149 + m.b150 == 1) m.c51 = Constraint(expr= m.b151 + m.b152 +", "m.b128 + m.b129 == 1) m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132", "22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134", "m.b123 == 1) m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 == 1)", "+ m.b5 + m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8 +", "m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9 = Constraint(expr=", "+ m.b156 == 1) m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 ==", "m.b165 == 1) m.c56 = Constraint(expr= m.b166 + m.b167 + m.b168 == 1)", "97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65*", "218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190", "14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 -", "m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112", "66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165", "m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178", "= Constraint(expr= m.b169 + m.b170 + m.b171 == 1) m.c58 = Constraint(expr= m.b172", "- 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93", "0 # # Nonzero counts # Total const NL DLL # 385 193", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 =", "90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155", "Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)", "76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85", "19908*m.b157*m.b160 + 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331*", "from pyomo.environ import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2", "*m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 +", "+ 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92", "1) m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 == 1) m.c61 =", "- 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139", "= Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46", "Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136", "Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b121 + m.b122 + m.b123 == 1) m.c42 = Constraint(expr= m.b124", "m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177 == 1) m.c60 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 -", "*m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87", "114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91", "m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 == 1) m.c42 = Constraint(expr=", "+ m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 ==", "- 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287*", "122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 -", "72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 +", "91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 =", "m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 =", "3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 +", "FX 0 0 0 0 0 0 0 0 # # Nonzero counts", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 =", "116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 - 277077*m.b187*m.b190", "+ 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164", "44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40", "49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240*", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 =", "= Constraint(expr= m.b100 + m.b101 + m.b102 == 1) m.c35 = Constraint(expr= m.b103", "19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 -", "variable and 1 equation from pyomo.environ import * model = m = ConcreteModel()", "- 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 -", "3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 +", "m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183", "1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81", "35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 =", "- 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83", "m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5", "67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146", "- 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 +", "m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22", "m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94", "m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154", "162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21*", "m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49", "- 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 +", "62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 =", "m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36", "1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)", "70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 -", "35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 +", "by GAMS Convert at 04/21/18 13:52:22 # # Equation counts # Total E", "Constraint(expr= m.b91 + m.b92 + m.b93 == 1) m.c32 = Constraint(expr= m.b94 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)", "61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7", "m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135", "Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b179 + m.b180 == 1) m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183", "m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161", "92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12*", "+ m.b36 == 1) m.c13 = Constraint(expr= m.b37 + m.b38 + m.b39 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 =", "+ 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 =", "04/21/18 13:52:22 # # Equation counts # Total E G L N X", "= ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181", "85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908", "m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134", "Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63", "- 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 +", "m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1) m.c31 = Constraint(expr=", "+ m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 =", "90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 - 156795*m.b129*m.b132 - 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154", "- 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68", "+ m.b152 + m.b153 == 1) m.c52 = Constraint(expr= m.b154 + m.b155 +", "m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18", "0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ", "m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 =", "+ m.b135 == 1) m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 ==", "- 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 -", "+ m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71 +", "= Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31", "Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70 +", "# 193 1 192 0 0 0 0 0 # FX 0 0", "Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22", "63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 +", "- 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 -", "1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 =", "== 1) m.c43 = Constraint(expr= m.b127 + m.b128 + m.b129 == 1) m.c44", "+ m.b122 + m.b123 == 1) m.c42 = Constraint(expr= m.b124 + m.b125 +", "- 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64", "+ 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142", "m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 -", "+ m.b137 + m.b138 == 1) m.c47 = Constraint(expr= m.b139 + m.b140 +", "= Constraint(expr= m.b157 + m.b158 + m.b159 == 1) m.c54 = Constraint(expr= m.b160", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 =", "= Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61", "m.b102 == 1) m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 == 1)", "m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64", "- 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45", "Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b124 + m.b125 + m.b126 == 1) m.c43 = Constraint(expr= m.b127", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 =", "+ 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 -", "- 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83", "= Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58", "= Constraint(expr= m.b7 + m.b8 + m.b9 == 1) m.c4 = Constraint(expr= m.b10", "integer sos1 sos2 scont sint # 193 1 192 0 0 0 0", "GAMS Convert at 04/21/18 13:52:22 # # Equation counts # Total E G", "+ 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192", "+ 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23", "+ 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107", "- 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 - 12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 +", "6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 -", "1) m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 == 1) m.c40 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 == 1) m.c51", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 =", "- 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98", "+ m.b89 + m.b90 == 1) m.c31 = Constraint(expr= m.b91 + m.b92 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 =", "- 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 -", "m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 == 1) m.c47 = Constraint(expr=", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 =", "== 1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7", "12091*m.b151*m.b154 + 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 -", "+ 70821*m.b79* m.b82 - 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023*", "m.b87 == 1) m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102", "Constraint(expr= m.b166 + m.b167 + m.b168 == 1) m.c57 = Constraint(expr= m.b169 +", "- 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166", "x b i s1s s2s sc si # Total cont binary integer sos1", "m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1)", "= Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67", "m.b114 == 1) m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 == 1)", "+ 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164", "- 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57", "+ 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 + 87321*m.b115*m.b118 + 43200*m.b115*m.b139 + 87321*m.b116*m.b119 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b10 + m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14", "+ 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22", "m.b188 + m.b189 == 1) m.c64 = Constraint(expr= m.b190 + m.b191 + m.b192", "= Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2 = Constraint(expr= m.b4", "Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1) m.c30 = Constraint(expr=", "== 1) m.c46 = Constraint(expr= m.b136 + m.b137 + m.b138 == 1) m.c47", "m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1)", "- 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 -", "m.b95 + m.b96 == 1) m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99", "m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15", "Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 =", "+ 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 +", "Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22 +", "m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 == 1) m.c48 = Constraint(expr=", "m.b13 + m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17", "m.b90 == 1) m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1)", "m.b189 == 1) m.c64 = Constraint(expr= m.b190 + m.b191 + m.b192 == 1)", "15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178", "- 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176", "m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75", "1) m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12 =", "- 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 +", "164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 +", "m.c48 = Constraint(expr= m.b142 + m.b143 + m.b144 == 1) m.c49 = Constraint(expr=", "45165*m.b31*m.b55 - 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 =", "18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62", "1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20 =", "189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117 + 56108*m.b114*m.b138 +", "22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 -", "m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 =", "m.b27 == 1) m.c10 = Constraint(expr= m.b28 + m.b29 + m.b30 == 1)", "- 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90", "177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 -", "+ 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125", "m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 == 1) m.c61 = Constraint(expr=", "Constraint(expr= m.b175 + m.b176 + m.b177 == 1) m.c60 = Constraint(expr= m.b178 +", "= Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12 = Constraint(expr= m.b34", "Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b49 + m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 =", "+ m.b92 + m.b93 == 1) m.c32 = Constraint(expr= m.b94 + m.b95 +", "m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144", "== 1) m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 == 1) m.c42", "m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103", "- 129158* m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58", "m.b160 + m.b161 + m.b162 == 1) m.c55 = Constraint(expr= m.b163 + m.b164", "m.b146 + m.b147 == 1) m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150", "equation from pyomo.environ import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)", "105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 +", "61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 +", "1) m.c38 = Constraint(expr= m.b112 + m.b113 + m.b114 == 1) m.c39 =", "49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129", "+ m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 ==", "m.b98 + m.b99 == 1) m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102", "m.b24 == 1) m.c9 = Constraint(expr= m.b25 + m.b26 + m.b27 == 1)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)", "Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model", "- 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6 + 67634*m.b3*m.b24 +", "m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 +", "counts # Total const NL DLL # 385 193 192 0 # #", "16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37", "70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 - 264072*m.b83*m.b107 -", "+ m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14 +", "Constraint(expr= m.b37 + m.b38 + m.b39 == 1) m.c14 = Constraint(expr= m.b40 +", "- 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77 - 85230*m.b54*m.b57 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 =", "22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 -", "m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95", "Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b151 + m.b152 + m.b153 == 1) m.c52 = Constraint(expr= m.b154 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)", "16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734*", "- 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123", "3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 +", "+ 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174", "m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72", "m.b173 + m.b174 == 1) m.c59 = Constraint(expr= m.b175 + m.b176 + m.b177", "# Nonzero counts # Total const NL DLL # 385 193 192 0", "m.b158 + m.b159 == 1) m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162", "49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 -", "33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 =", "114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78", "+ 66609*m.b157*m.b181 - 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163", "Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)", "1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 =", "m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190", "m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123", "Constraint(expr= m.b142 + m.b143 + m.b144 == 1) m.c49 = Constraint(expr= m.b145 +", "- 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 - 49240*m.b121*m.b145 + 1787*m.b122*m.b125 - 39963*m.b122*m.b143", "m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148", "Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9 = Constraint(expr= m.b25 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b159 == 1) m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 ==", "m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105", "+ 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179 -", "m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163", "written by GAMS Convert at 04/21/18 13:52:22 # # Equation counts # Total", "- 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 - 147716*m.b14*m.b38 + 130308*m.b14*m.b182 + 117135*m.b15*m.b18 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b168 == 1) m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171 == 1)", "= Constraint(expr= m.b97 + m.b98 + m.b99 == 1) m.c34 = Constraint(expr= m.b100", "153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 -", "130590* m.b96*m.b120 - 8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 =", "- 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 -", "m.b120 == 1) m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 == 1)", "m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140", "85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 - 75908*m.b168*m.b192 -", "- 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 +", "169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 +", "154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142 - 8148*m.b139*m.b163", "+ m.b65 + m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68 +", "+ m.b180 == 1) m.c61 = Constraint(expr= m.b181 + m.b182 + m.b183 ==", "= Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18 = Constraint(expr= m.b52", "Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77 +", "- 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149", "37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 +", "= Constraint(expr= m.b88 + m.b89 + m.b90 == 1) m.c31 = Constraint(expr= m.b91", "m.c1 = Constraint(expr= m.b1 + m.b2 + m.b3 == 1) m.c2 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)", "159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 -", "+ m.b114 == 1) m.c39 = Constraint(expr= m.b115 + m.b116 + m.b117 ==", "= Constraint(expr= m.b145 + m.b146 + m.b147 == 1) m.c50 = Constraint(expr= m.b148", "- 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155*", "+ 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106 - 28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128", "+ 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871*", "73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69", "6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 -", "127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 + 35260*m.b6*m.b30 - 110030*m.b6*m.b174 - 68458*m.b7*m.b10", "20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117", "110030*m.b6*m.b174 - 68458*m.b7*m.b10 - 22985*m.b7*m.b31 - 35743*m.b7*m.b175 - 68458*m.b8* m.b11 - 22985*m.b8*m.b32 -", "Constraint(expr= m.b109 + m.b110 + m.b111 == 1) m.c38 = Constraint(expr= m.b112 +", "= m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)", "Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59", "1787*m.b122*m.b125 - 39963*m.b122*m.b143 - 49240*m.b122*m.b146 + 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 -", "m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40", "== 1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23", "m.b81 == 1) m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1)", "+ 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 +", "- 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46", "m.b155 + m.b156 == 1) m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 =", "20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141", "m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122", "m.b32*m.b35 - 45165*m.b32*m.b56 - 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38", "218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191 -", "1) m.c54 = Constraint(expr= m.b160 + m.b161 + m.b162 == 1) m.c55 =", "== 1) m.c63 = Constraint(expr= m.b187 + m.b188 + m.b189 == 1) m.c64", "22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186", "61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113", "m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189", "97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67", "m.b20 + m.b21 == 1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24", "Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84", "m.b82 + m.b83 + m.b84 == 1) m.c29 = Constraint(expr= m.b85 + m.b86", "m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 =", "- 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14", "== 1) m.c28 = Constraint(expr= m.b82 + m.b83 + m.b84 == 1) m.c29", "m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153", "m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7", "= Constraint(expr= m.b187 + m.b188 + m.b189 == 1) m.c64 = Constraint(expr= m.b190", "m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32", "Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 =", "== 1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1) m.c24", "m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41", "Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156", "+ 7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 =", "17070*m.b136*m.b139 - 154864*m.b136*m.b160 + 17070*m.b137*m.b140 - 154864*m.b137*m.b161 + 17070*m.b138*m.b141 - 154864*m.b138*m.b162 - 162791*m.b139*m.b142", "13:52:22 # # Equation counts # Total E G L N X C", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 =", "= Constraint(expr= m.b106 + m.b107 + m.b108 == 1) m.c37 = Constraint(expr= m.b109", "16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113", "211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 - 65416*m.b150*m.b174 -", "+ m.b119 + m.b120 == 1) m.c41 = Constraint(expr= m.b121 + m.b122 +", "m.b73 + m.b74 + m.b75 == 1) m.c26 = Constraint(expr= m.b76 + m.b77", "1) m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 == 1) m.c59 =", "m.b43 + m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47", "m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79", "28668*m.b103*m.b127 - 14134 *m.b104*m.b107 - 28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 =", "8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 -", "sos1 sos2 scont sint # 193 1 192 0 0 0 0 0", "m.b33 == 1) m.c12 = Constraint(expr= m.b34 + m.b35 + m.b36 == 1)", "== 1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1) m.c28", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 =", "- 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132", "Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 =", "s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint", "- 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 -", "- 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114", "0 0 # # Variable counts # x b i s1s s2s sc", "m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16", "+ m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 ==", "8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 +", "# 65 65 0 0 0 0 0 0 # # Variable counts", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178", "m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21 = Constraint(expr=", "- 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47", "76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 - 54058*m.b132*m.b156 -", "m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87", "+ m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 ==", "61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 - 22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 +", "+ 47044*m.b151*m.b175 - 12091*m.b152*m.b155 + 47044*m.b152*m.b176 - 12091*m.b153*m.b156 + 47044* m.b153*m.b177 - 64916*m.b154*m.b157", "68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9* m.b177", "== 1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19", "1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1) m.c28 =", "+ 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39 + 18064*m.b36*m.b60 - 164293*m.b37*m.b40 -", "192 0 0 0 0 0 # FX 0 0 0 0 0", "Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)", "= Constraint(expr= m.b142 + m.b143 + m.b144 == 1) m.c49 = Constraint(expr= m.b145", "== 1) m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 == 1) m.c41", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 =", "- 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49", "m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23", "+ m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19 + m.b20 +", "m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 =", "- 36871* m.b49*m.b73 + 73006*m.b50*m.b53 - 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72", "m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188", "m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37", "+ 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)", "1) m.c42 = Constraint(expr= m.b124 + m.b125 + m.b126 == 1) m.c43 =", "19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184 - 22331*m.b161*m.b164 - 32557*m.b161*m.b185 -", "162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 =", "m.b177 + 173612*m.b10*m.b13 + 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179", "67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 -", "m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25 = Constraint(expr=", "67634*m.b3*m.b24 + 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8", "21093*m.b17*m.b185 + 91667*m.b18*m.b21 + 153955*m.b18*m.b42 - 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 -", "264072*m.b83*m.b107 - 61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 +", "+ m.b96 == 1) m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 ==", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)", "m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4", "+ 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 =", "== 1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3", "+ m.b102 == 1) m.c35 = Constraint(expr= m.b103 + m.b104 + m.b105 ==", "22047*m.b107*m.b131 - 61805*m.b108*m.b111 - 22047*m.b108*m.b132 + 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716*", "+ m.b177 == 1) m.c60 = Constraint(expr= m.b178 + m.b179 + m.b180 ==", "- 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 +", "*m.b167*m.b191 - 75908*m.b168*m.b192 - 75258*m.b169*m.b172 + 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 -", "- 58023*m.b79*m.b103 + 70821*m.b80*m.b83 - 58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85", "33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187", "37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172 + 211004*m.b149* m.b152 - 65416*m.b149*m.b173 + 211004*m.b150*m.b153 -", "+ m.b129 == 1) m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 ==", "m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23 = Constraint(expr=", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 =", "+ 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173 + 127500*m.b6*m.b9 +", "+ 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93 + 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115", "m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47", "m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 -", "m.b40 + m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43 + m.b44", "m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6", "m.b148 + m.b149 + m.b150 == 1) m.c51 = Constraint(expr= m.b151 + m.b152", "61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 - 83602*m.b3*m.b6", "38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 + 22308*m.b100*m.b103 + 177432*m.b100*m.b124 +", "1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5 =", "= Constraint(expr= m.b136 + m.b137 + m.b138 == 1) m.c47 = Constraint(expr= m.b139", "162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48", "= Constraint(expr= m.b181 + m.b182 + m.b183 == 1) m.c62 = Constraint(expr= m.b184", "22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 -", "m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67", "m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63", "Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b148 + m.b149 + m.b150 == 1) m.c51 = Constraint(expr= m.b151 +", "+ m.b44 + m.b45 == 1) m.c16 = Constraint(expr= m.b46 + m.b47 +", "+ 1787*m.b123*m.b126 - 39963*m.b123*m.b144 - 49240* m.b123*m.b147 - 19759*m.b124*m.b127 - 51266*m.b124*m.b148 - 19759*m.b125*m.b128", "- 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97", "1) m.c53 = Constraint(expr= m.b157 + m.b158 + m.b159 == 1) m.c54 =", "+ m.b99 == 1) m.c34 = Constraint(expr= m.b100 + m.b101 + m.b102 ==", "Nonzero counts # Total const NL DLL # 385 193 192 0 #", "114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 - 156795*m.b128*m.b131 - 90008*m.b128*m.b152 -", "m.b147 == 1) m.c50 = Constraint(expr= m.b148 + m.b149 + m.b150 == 1)", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 =", "+ 74165*m.b20*m.b23 - 220722*m.b20*m.b44 - 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189", "Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b124 + m.b125 + m.b126 == 1) m.c43 = Constraint(expr= m.b127 + m.b128", "m.b150 == 1) m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 == 1)", "92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 + 159379*m.b89*m.b92 + 204734*m.b89*m.b113 + 159379*m.b90*m.b93", "m.b91 + m.b92 + m.b93 == 1) m.c32 = Constraint(expr= m.b94 + m.b95", "- 162288*m.b20*m.b188 + 74165*m.b21*m.b24 - 220722*m.b21*m.b45 - 162288*m.b21* m.b189 + 35287*m.b22*m.b46 - 73662*m.b22*m.b190", "8447*m.b97*m.b100 + 90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102", "+ 61711*m.b3*m.b27 - 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 +", "+ 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 + 2925*m.b25*m.b46 - 24145*m.b25*m.b49 + 47953*m.b26*m.b29", "m.b14 + m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18", "# Reformulation has removed 1 variable and 1 equation from pyomo.environ import *", "1) m.c15 = Constraint(expr= m.b43 + m.b44 + m.b45 == 1) m.c16 =", "73662*m.b22*m.b190 + 35287*m.b23*m.b47 - 73662*m.b23*m.b191 + 35287* m.b24*m.b48 - 73662*m.b24*m.b192 + 47953*m.b25*m.b28 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 =", "m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171", "m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168", "43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144", "0 0 # # Nonzero counts # Total const NL DLL # 385", "Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b6 == 1) m.c3 = Constraint(expr= m.b7 + m.b8 + m.b9 == 1)", "+ 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26 - 59956*m.b2*m.b170 -", "== 1) m.c8 = Constraint(expr= m.b22 + m.b23 + m.b24 == 1) m.c9", "- 68458*m.b8* m.b11 - 22985*m.b8*m.b32 - 35743*m.b8*m.b176 - 68458*m.b9*m.b12 - 22985*m.b9*m.b33 - 35743*m.b9*", "scont sint # 193 1 192 0 0 0 0 0 # FX", "m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42", "m.c11 = Constraint(expr= m.b31 + m.b32 + m.b33 == 1) m.c12 = Constraint(expr=", "m.b52 + m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56", "92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 +", "- 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 -", "+ m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31 + m.b32 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 =", "Constraint(expr= m.b28 + m.b29 + m.b30 == 1) m.c11 = Constraint(expr= m.b31 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 =", "+ 199680*m.b10*m.b34 + 92582*m.b10*m.b178 + 173612*m.b11*m.b14 + 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 +", "275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 - 20555*m.b135*m.b138 - 275957*m.b135*m.b159 + 17070*m.b136*m.b139 -", "90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135 -", "= Constraint(expr= m.b52 + m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55", "Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b48 == 1) m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1)", "+ 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91", "+ m.b101 + m.b102 == 1) m.c35 = Constraint(expr= m.b103 + m.b104 +", "sc si # Total cont binary integer sos1 sos2 scont sint # 193", "91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41 - 21093*m.b17*m.b185 + 91667*m.b18*m.b21", "m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184", "- 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60", "+ m.b116 + m.b117 == 1) m.c40 = Constraint(expr= m.b118 + m.b119 +", "m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80 + m.b81 == 1)", "- 164293*m.b37*m.b40 - 62562*m.b37*m.b61 - 164293*m.b38*m.b41 - 62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63", "m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50", "1) m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 == 1) m.c41 =", "- 21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44", "18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99 - 35802*m.b76*m.b79 - 95280*m.b76*m.b100 - 35802*m.b77*m.b80 -", "m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56", "m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121", "7440* m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84", "169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 -", "189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120 - 8447*m.b97*m.b100 +", "- 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 - 85264*m.b165*m.b189 - 75908*m.b166*m.b190 - 75908 *m.b167*m.b191", "1) m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 == 1) m.c31 =", "== 1) m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 == 1) m.c52", "+ m.b171 == 1) m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 ==", "+ 43200*m.b115*m.b139 + 87321*m.b116*m.b119 + 43200*m.b116*m.b140 + 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142", "+ m.b53 + m.b54 == 1) m.c19 = Constraint(expr= m.b55 + m.b56 +", "129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 - 44654*m.b36*m.b39", "28668*m.b104*m.b128 - 14134*m.b105*m.b108 - 28668*m.b105*m.b129 - 61805*m.b106* m.b109 - 22047*m.b106*m.b130 - 61805*m.b107*m.b110 -", "1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1) m.c3 =", "- 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 - 37043*m.b147*m.b171 + 211004*m.b148*m.b151 - 65416*m.b148*m.b172", "m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45", "m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 - 3896*m.b142*m.b166 - 3896*m.b143* m.b167 - 3896*m.b144*m.b168 -", "33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 =", "m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27", "m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71 + m.b72", "Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b89 + m.b90 == 1) m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93", "= Constraint(expr= m.b70 + m.b71 + m.b72 == 1) m.c25 = Constraint(expr= m.b73", "59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 - 110030*m.b5*m.b173", "- 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652* m.b75*m.b78 + 114918*m.b75*m.b96 - 6803*m.b75*m.b99", "- 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160", "= Constraint(expr= m.b133 + m.b134 + m.b135 == 1) m.c46 = Constraint(expr= m.b136", "+ m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15 ==", "Constraint(expr= m.b178 + m.b179 + m.b180 == 1) m.c61 = Constraint(expr= m.b181 +", "m.b51 == 1) m.c18 = Constraint(expr= m.b52 + m.b53 + m.b54 == 1)", "+ 199680*m.b11*m.b35 + 92582*m.b11*m.b179 + 173612*m.b12*m.b15 + 199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16", "0 0 0 # FX 0 0 0 0 0 0 0 0", "m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77", "m.b97 + m.b98 + m.b99 == 1) m.c34 = Constraint(expr= m.b100 + m.b101", "1) m.c41 = Constraint(expr= m.b121 + m.b122 + m.b123 == 1) m.c42 =", "+ m.b128 + m.b129 == 1) m.c44 = Constraint(expr= m.b130 + m.b131 +", "# Total const NL DLL # 385 193 192 0 # # Reformulation", "pyomo.environ import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 =", "+ m.b110 + m.b111 == 1) m.c38 = Constraint(expr= m.b112 + m.b113 +", "1) m.c31 = Constraint(expr= m.b91 + m.b92 + m.b93 == 1) m.c32 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.obj", "1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ 114918*m.b73* m.b94 - 6803*m.b73*m.b97 - 18652*m.b74*m.b77 + 114918*m.b74*m.b95 - 6803*m.b74*m.b98 - 18652*", "m.b15 == 1) m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1)", "m.b139 + m.b140 + m.b141 == 1) m.c48 = Constraint(expr= m.b142 + m.b143", "- 51266*m.b124*m.b148 - 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 =", "29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 -", "m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60", "+ 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 + 59421*m.b184*m.b187 + 59421*m.b185*m.b188 + 59421*m.b186*m.b189 -", "+ 204734* m.b90*m.b114 - 189099*m.b91*m.b94 - 64588*m.b91*m.b115 - 189099*m.b92*m.b95 - 64588*m.b92*m.b116 - 189099*m.b93*m.b96", "m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133", "1) m.c22 = Constraint(expr= m.b64 + m.b65 + m.b66 == 1) m.c23 =", "- 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185", "114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 -", "24145*m.b27*m.b51 - 122136*m.b28*m.b31 - 77871*m.b28*m.b52 - 122136*m.b29*m.b32 - 77871*m.b29* m.b53 - 122136*m.b30*m.b33 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 =", "37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150 + 45364* m.b147*m.b168 -", "+ m.b143 + m.b144 == 1) m.c49 = Constraint(expr= m.b145 + m.b146 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 =", "164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65", "- 95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82", "75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 -", "Constraint(expr= m.b94 + m.b95 + m.b96 == 1) m.c33 = Constraint(expr= m.b97 +", "- 59956*m.b3*m.b171 + 127500*m.b4*m.b7 + 35260*m.b4*m.b28 - 110030*m.b4*m.b172 + 127500*m.b5*m.b8 + 35260*m.b5*m.b29 -", "+ 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52 - 97425*m.b49*m.b70 - 36871* m.b49*m.b73 + 73006*m.b50*m.b53", "162791*m.b139*m.b142 - 8148*m.b139*m.b163 - 162791*m.b140*m.b143 - 8148* m.b140*m.b164 - 162791*m.b141*m.b144 - 8148*m.b141*m.b165 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 =", "- 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 + 56108*m.b113*m.b137 - 189188*m.b114*m.b117", "Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c44 = Constraint(expr= m.b130 + m.b131 + m.b132 == 1) m.c45", "m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137", "Constraint(expr= m.b163 + m.b164 + m.b165 == 1) m.c56 = Constraint(expr= m.b166 +", "+ 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90", "== 1) m.c7 = Constraint(expr= m.b19 + m.b20 + m.b21 == 1) m.c8", "+ 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 + 97476*m.b63*m.b66 - 234690*m.b63*m.b87 +", "+ 87321* m.b117*m.b120 + 43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 +", "m.c58 = Constraint(expr= m.b172 + m.b173 + m.b174 == 1) m.c59 = Constraint(expr=", "61946*m.b84*m.b87 - 264072*m.b84*m.b108 - 92130*m.b85*m.b88 + 16108*m.b85*m.b109 - 92130*m.b86 *m.b89 + 16108*m.b86*m.b110 -", "+ m.b117 == 1) m.c40 = Constraint(expr= m.b118 + m.b119 + m.b120 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b60 == 1) m.c21 = Constraint(expr= m.b61 + m.b62 + m.b63 == 1)", "+ m.b176 + m.b177 == 1) m.c60 = Constraint(expr= m.b178 + m.b179 +", "m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74", "63550*m.b53*m.b77 - 85230*m.b54*m.b57 - 63550*m.b54*m.b78 - 153638*m.b55*m.b58 + 84496*m.b55* m.b79 - 153638*m.b56*m.b59 +", "+ 15236*m.b169*m.b190 - 75258*m.b170* m.b173 + 15236*m.b170*m.b191 - 75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 =", "m.b67 + m.b68 + m.b69 == 1) m.c24 = Constraint(expr= m.b70 + m.b71", "+ m.b150 == 1) m.c51 = Constraint(expr= m.b151 + m.b152 + m.b153 ==", "218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754* m.b67*m.b91 - 72968*m.b68*m.b71 +", "+ m.b189 == 1) m.c64 = Constraint(expr= m.b190 + m.b191 + m.b192 ==", "m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59 + m.b60 == 1)", "m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66", "+ 97476*m.b63*m.b66 - 234690*m.b63*m.b87 + 114707*m.b64*m.b67 + 218718*m.b64*m.b88 + 114707*m.b65* m.b68 + 218718*m.b65*m.b89", "m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116 +", "+ 29936*m.b109*m.b112 - 36716*m.b109*m.b133 + 29936*m.b110*m.b113 - 36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135", "90736*m.b97*m.b118 + 38420*m.b97*m.b121 - 8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120", "m.b167 + m.b168 == 1) m.c57 = Constraint(expr= m.b169 + m.b170 + m.b171", "+ m.b87 == 1) m.c30 = Constraint(expr= m.b88 + m.b89 + m.b90 ==", "Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38", "m.b11 + m.b12 == 1) m.c5 = Constraint(expr= m.b13 + m.b14 + m.b15", "Constraint(expr= m.b127 + m.b128 + m.b129 == 1) m.c44 = Constraint(expr= m.b130 +", "== 1) m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20", "43200*m.b117*m.b141 - 105343*m.b118*m.b142 - 105343*m.b119*m.b143 - 105343*m.b120 *m.b144 + 1787*m.b121*m.b124 - 39963*m.b121*m.b142 -", "3058*m.b175*m.b178 - 3058*m.b176*m.b179 - 3058*m.b177 *m.b180 + 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 +", "+ 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 +", "m.b53 - 122136*m.b30*m.b33 - 77871*m.b30*m.b54 - 129158*m.b31*m.b34 - 45165*m.b31*m.b55 - 129158* m.b32*m.b35 -", "= Constraint(expr= m.b103 + m.b104 + m.b105 == 1) m.c36 = Constraint(expr= m.b106", "+ 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 -", "m.b107 + m.b108 == 1) m.c37 = Constraint(expr= m.b109 + m.b110 + m.b111", "0 0 0 0 0 0 # # Nonzero counts # Total const", "Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)", "- 129158*m.b33*m.b36 - 45165*m.b33*m.b57 - 44654*m.b34*m.b37 + 18064*m.b34*m.b58 - 44654*m.b35*m.b38 + 18064*m.b35*m.b59 -", "153638*m.b56*m.b59 + 84496*m.b56*m.b80 - 153638*m.b57*m.b60 + 84496*m.b57*m.b81 + 7440* m.b58*m.b61 - 67520*m.b58*m.b82 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c26 = Constraint(expr= m.b76 + m.b77 + m.b78 == 1) m.c27", "Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)", "58023*m.b80*m.b104 + 70821*m.b81*m.b84 - 58023* m.b81*m.b105 - 61946*m.b82*m.b85 - 264072*m.b82*m.b106 - 61946*m.b83*m.b86 -", "8447*m.b98*m.b101 + 90736*m.b98*m.b119 + 38420*m.b98*m.b122 - 8447*m.b99*m.b102 + 90736*m.b99*m.b120 + 38420*m.b99* m.b123 +", "- 90008*m.b129*m.b153 + 76764*m.b130*m.b133 - 54058*m.b130*m.b154 + 76764*m.b131*m.b134 - 54058*m.b131*m.b155 + 76764* m.b132*m.b135", "Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)", "Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5 + 67634*m.b2*m.b23 + 61711*m.b2*m.b26", "67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)", "m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20", "54754* m.b67*m.b91 - 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 -", "62562*m.b38*m.b62 - 164293*m.b39 *m.b42 - 62562*m.b39*m.b63 + 15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 -", "+ 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 + 77518*m.b47*m.b71 + 77518*m.b48*m.b72 + 73006*m.b49*m.b52", "m.b66 == 1) m.c23 = Constraint(expr= m.b67 + m.b68 + m.b69 == 1)", "32557*m.b161*m.b185 - 22331*m.b162* m.b165 - 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)", "36716* m.b110*m.b134 + 29936*m.b111*m.b114 - 36716*m.b111*m.b135 - 189188*m.b112*m.b115 + 56108*m.b112* m.b136 - 189188*m.b113*m.b116", "m.b132*m.b135 - 54058*m.b132*m.b156 - 20555*m.b133*m.b136 - 275957*m.b133*m.b157 - 20555*m.b134* m.b137 - 275957*m.b134*m.b158 -", "m.b58*m.b61 - 67520*m.b58*m.b82 + 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 +", "= Var(within=Binary,bounds=(0,1),initialize=0) m.obj = Objective(expr=67634*m.b1*m.b22 - 83602*m.b1*m.b4 + 61711*m.b1*m.b25 - 59956*m.b1*m.b169 - 83602*m.b2*m.b5", "7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85 + 97476*m.b62*m.b65 - 234690*m.b62*m.b86 +", "19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151 -", "+ 130308*m.b15 *m.b183 + 91667*m.b16*m.b19 + 153955*m.b16*m.b40 - 21093*m.b16*m.b184 + 91667*m.b17*m.b20 + 153955*m.b17*m.b41", "m.c6 = Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7 = Constraint(expr=", "m.b61 + m.b62 + m.b63 == 1) m.c22 = Constraint(expr= m.b64 + m.b65", "m.b167 - 3896*m.b144*m.b168 - 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167", "92130*m.b86 *m.b89 + 16108*m.b86*m.b110 - 92130*m.b87*m.b90 + 16108*m.b87*m.b111 + 159379*m.b88*m.b91 + 204734*m.b88*m.b112 +", "m.b182 + m.b183 == 1) m.c62 = Constraint(expr= m.b184 + m.b185 + m.b186", "i s1s s2s sc si # Total cont binary integer sos1 sos2 scont", "Constraint(expr= m.b139 + m.b140 + m.b141 == 1) m.c48 = Constraint(expr= m.b142 +", "+ m.b77 + m.b78 == 1) m.c27 = Constraint(expr= m.b79 + m.b80 +", "105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 - 105352*m.b147*m.b150", "+ 7440*m.b59*m.b62 - 67520*m.b59*m.b83 + 7440*m.b60*m.b63 - 67520 *m.b60*m.b84 + 97476*m.b61*m.b64 - 234690*m.b61*m.b85", "24145*m.b25*m.b49 + 47953*m.b26*m.b29 + 2925*m.b26*m.b47 - 24145*m.b26*m.b50 + 47953*m.b27*m.b30 + 2925*m.b27*m.b48 - 24145*m.b27*m.b51", "Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)", "47044* m.b153*m.b177 - 64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159", "73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 - 63550*m.b52*m.b76 - 85230*m.b53*m.b56 - 63550*m.b53*m.b77", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 =", "Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b16 + m.b17 + m.b18 == 1) m.c7 = Constraint(expr= m.b19 +", "Constraint(expr= m.b88 + m.b89 + m.b90 == 1) m.c31 = Constraint(expr= m.b91 +", "- 97425*m.b50*m.b71 - 36871*m.b50*m.b74 + 73006*m.b51*m.b54 - 97425*m.b51*m.b72 - 36871*m.b51*m.b75 - 85230*m.b52*m.b55 -", "54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 - 169837*m.b72*m.b96 - 18652*m.b73*m.b76 + 114918*m.b73* m.b94 -", "22308*m.b100*m.b103 + 177432*m.b100*m.b124 + 22308*m.b101*m.b104 + 177432*m.b101*m.b125 + 22308*m.b102*m.b105 + 177432*m.b102*m.b126 - 14134*m.b103*m.b106", "Constraint(expr= m.b130 + m.b131 + m.b132 == 1) m.c45 = Constraint(expr= m.b133 +", "- 19908*m.b158*m.b161 + 66609*m.b158*m.b182 - 19908*m.b159*m.b162 + 66609*m.b159*m.b183 - 22331* m.b160*m.b163 - 32557*m.b160*m.b184", "277077*m.b187*m.b190 - 277077*m.b188*m.b191 - 277077*m.b189*m.b192 , sense=minimize) m.c1 = Constraint(expr= m.b1 + m.b2", "+ 15254*m.b42*m.b45 - 73788*m.b42*m.b66 + 67357*m.b43*m.b46 + 145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 +", "+ 114707*m.b65* m.b68 + 218718*m.b65*m.b89 + 114707*m.b66*m.b69 + 218718*m.b66*m.b90 - 72968*m.b67*m.b70 + 54754*", "m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15 = Constraint(expr=", "65 65 0 0 0 0 0 0 # # Variable counts #", "m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99", "m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170", "95280*m.b76*m.b100 - 35802*m.b77*m.b80 - 95280*m.b77*m.b101 - 35802*m.b78*m.b81 - 95280*m.b78*m.b102 + 70821*m.b79* m.b82 -", "Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)", "Constraint(expr= m.b169 + m.b170 + m.b171 == 1) m.c58 = Constraint(expr= m.b172 +", "Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)", "== 1) m.c14 = Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 =", "75258*m.b171*m.b174 + 15236*m.b171*m.b192 - 72030*m.b172*m.b175 - 72030*m.b173*m.b176 - 72030*m.b174*m.b177 - 3058*m.b175*m.b178 - 3058*m.b176*m.b179", "m.c17 = Constraint(expr= m.b49 + m.b50 + m.b51 == 1) m.c18 = Constraint(expr=", "Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)", "199680*m.b12*m.b36 + 92582*m.b12* m.b180 + 117135*m.b13*m.b16 - 147716*m.b13*m.b37 + 130308*m.b13*m.b181 + 117135*m.b14*m.b17 -", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 =", "m.b79 + m.b80 + m.b81 == 1) m.c28 = Constraint(expr= m.b82 + m.b83", "- 19759*m.b125*m.b128 - 51266*m.b125* m.b149 - 19759*m.b126*m.b129 - 51266*m.b126*m.b150 - 156795*m.b127*m.b130 - 90008*m.b127*m.b151", "- 64588*m.b92*m.b116 - 189099*m.b93*m.b96 - 64588*m.b93*m.b117 + 130590*m.b94*m.b118 + 130590*m.b95*m.b119 + 130590* m.b96*m.b120", "1) m.c29 = Constraint(expr= m.b85 + m.b86 + m.b87 == 1) m.c30 =", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 =", "158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 - 19908*m.b157*m.b160 +", "m.c19 = Constraint(expr= m.b55 + m.b56 + m.b57 == 1) m.c20 = Constraint(expr=", "21093*m.b18* m.b186 + 74165*m.b19*m.b22 - 220722*m.b19*m.b43 - 162288*m.b19*m.b187 + 74165*m.b20*m.b23 - 220722*m.b20*m.b44 -", "m.b3 == 1) m.c2 = Constraint(expr= m.b4 + m.b5 + m.b6 == 1)", "145724*m.b43*m.b67 + 67357*m.b44*m.b47 + 145724*m.b44*m.b68 + 67357*m.b45*m.b48 + 145724*m.b45*m.b69 + 77518*m.b46* m.b70 +", "- 105352*m.b145*m.b148 + 45364*m.b145*m.b166 - 37043*m.b145*m.b169 - 105352*m.b146*m.b149 + 45364*m.b146*m.b167 - 37043*m.b146*m.b170 -", "== 1) m.c47 = Constraint(expr= m.b139 + m.b140 + m.b141 == 1) m.c48", "+ 33988*m.b178*m.b181 + 33988*m.b179*m.b182 + 33988*m.b180*m.b183 + 116509*m.b181*m.b184 + 116509*m.b182*m.b185 + 116509*m.b183*m.b186 +", "1) m.c33 = Constraint(expr= m.b97 + m.b98 + m.b99 == 1) m.c34 =", "64916*m.b154*m.b157 - 158531*m.b154*m.b178 - 64916*m.b155*m.b158 - 158531*m.b155* m.b179 - 64916*m.b156*m.b159 - 158531*m.b156*m.b180 -", "15254*m.b40*m.b43 - 73788*m.b40*m.b64 + 15254*m.b41*m.b44 - 73788* m.b41*m.b65 + 15254*m.b42*m.b45 - 73788*m.b42*m.b66 +", "m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165", "Constraint(expr= m.b58 + m.b59 + m.b60 == 1) m.c21 = Constraint(expr= m.b61 +", "== 1) m.c4 = Constraint(expr= m.b10 + m.b11 + m.b12 == 1) m.c5", "# Total E G L N X C B # 65 65 0", "= Constraint(expr= m.b118 + m.b119 + m.b120 == 1) m.c41 = Constraint(expr= m.b121", "+ m.b56 + m.b57 == 1) m.c20 = Constraint(expr= m.b58 + m.b59 +", "m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177", "m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151", "- 72968*m.b68*m.b71 + 54754*m.b68*m.b92 - 72968*m.b69*m.b72 + 54754*m.b69*m.b93 - 169837*m.b70*m.b94 - 169837*m.b71*m.b95 -", "- 32557*m.b162*m.b186 - 218808*m.b163*m.b166 - 85264*m.b163*m.b187 - 218808*m.b164*m.b167 - 85264*m.b164*m.b188 - 218808*m.b165*m.b168 -", "= Constraint(expr= m.b40 + m.b41 + m.b42 == 1) m.c15 = Constraint(expr= m.b43", "binary integer sos1 sos2 scont sint # 193 1 192 0 0 0", "= Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 =", "Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)", "+ m.b167 + m.b168 == 1) m.c57 = Constraint(expr= m.b169 + m.b170 +", "m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140", "m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28" ]
[ "pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import", "@pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def", "return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from zeebe_grpc.gateway_pb2_grpc import", "import patch, MagicMock from uuid import uuid4 import pytest from pyzeebe import ZeebeClient,", "job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel):", "\"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter()", "ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter", "pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock", "def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker,", "unittest.mock import patch, MagicMock from uuid import uuid4 import pytest from pyzeebe import", "job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\")", "random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def", "x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture", "Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler", "zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type): return", "return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type():", "worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type,", "str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock():", "return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y,", "patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock:", "@pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return", "routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return", "Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type(): return", "task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job: return job", "-> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server", "MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer():", "def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in range(0,", "def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job: return", "def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from zeebe_grpc.gateway_pb2_grpc import GatewayStub return GatewayStub", "randint from threading import Event from unittest.mock import patch, MagicMock from uuid import", "randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job)", "worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)),", "dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel())", "import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from", "for _ in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def", "job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def", "from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter)", "= ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda", "MagicMock from uuid import uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter,", "random import randint from threading import Event from unittest.mock import patch, MagicMock from", "@pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task):", "simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc", "\"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield", "from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from", "z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event()", "@pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from zeebe_grpc.gateway_pb2_grpc import GatewayStub return", "spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def", "random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables", "decorator(): def simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server():", "Event from unittest.mock import patch, MagicMock from uuid import uuid4 import pytest from", "import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter", "tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job()", "import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def", "yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for", "ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock", "return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker", "@pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in", "range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job:", "worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z:", "def decorator(): def simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def", "dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def", "from uuid import uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job", "@pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter):", "def simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from", "task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture", "100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) ->", "ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import", "handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\")", "mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def", "def routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture def task_handler():", "x, y, z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test", "job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return", "as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy", "from unittest.mock import patch, MagicMock from uuid import uuid4 import pytest from pyzeebe", "stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock:", "add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from zeebe_grpc.gateway_pb2_grpc", "ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture", "ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from", "def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture", "return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))]", "y, z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test =", "return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server", "task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def", "pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def", "ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture", "patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\")", "mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture", "zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel):", "from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture", "zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x,", "@pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker):", "@pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\")", "from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from", "random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture", "in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def", "= dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return", "mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock", "stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy =", "pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils", "@pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with", "return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job: return job return", "ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x:", "return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def", "import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter):", "def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter()", "import randint from threading import Event from unittest.mock import patch, MagicMock from uuid", "job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job =", "def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker", "mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return", "from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task", "tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture", "import Event from unittest.mock import patch, MagicMock from uuid import uuid4 import pytest", "as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield", "yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture", "def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4()))", "@pytest.fixture def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def", "patch, MagicMock from uuid import uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker,", "pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from", "with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as", "MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type(): return str(uuid4())", "MagicMock(wraps=lambda x, y, z: x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test():", "ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job: return job return MagicMock(wraps=simple_decorator)", "return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler()", "return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture", "import uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter", "@pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return", "import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import", "from threading import Event from unittest.mock import patch, MagicMock from uuid import uuid4", "ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task from pyzeebe.worker.task_handler", "uuid import uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from", "random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def", "Job) -> Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import", "= mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers():", "def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel):", "_ in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator():", "yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def", "import Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import", "router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2,", "def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter", "stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as", "mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router():", "spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture def routers(): return [ZeebeTaskRouter() for _", "@pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda x, y, z: x))", "def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test stop_test.set()", "@pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables =", "= zeebe_adapter return worker @pytest.fixture def task(task_type): return Task(task_type, MagicMock(wraps=lambda x: dict(x=x)), MagicMock(wraps=lambda", "def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job", "zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return", "uuid4 import pytest from pyzeebe import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import", "job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return", "handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker, \"_handle_not_alive_thread\") yield spy @pytest.fixture def router(): return ZeebeTaskRouter() @pytest.fixture", "GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter():", "[ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture", "def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock()", "@pytest.fixture def task_handler(): return ZeebeTaskHandler() @pytest.fixture def decorator(): def simple_decorator(job: Job) -> Job:", "grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\")", "stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker):", "zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker =", "return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def", "Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock", "return random_job() @pytest.fixture def job_from_task(task): job = random_job(task) job.variables = dict(x=str(uuid4())) return job", "def stop_after_test(): stop_test = Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\")", "def stop_event_mock(zeebe_worker): with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy", "@pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def stop_event_mock(zeebe_worker): with", "@pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield stop_test", "ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return", "return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture def job_from_task(task): job = random_job(task)", "add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def grpc_stub_cls(grpc_channel): from zeebe_grpc.gateway_pb2_grpc import GatewayStub", "from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return add_GatewayServicer_to_server @pytest.fixture(scope=\"module\") def grpc_servicer(): return GatewayMock() @pytest.fixture(scope=\"module\") def", "threading import Event from unittest.mock import patch, MagicMock from uuid import uuid4 import", "from tests.unit.utils.random_utils import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return", "def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter = zeebe_adapter return worker @pytest.fixture def task(task_type):", "Task from pyzeebe.worker.task_handler import ZeebeTaskHandler from tests.unit.utils.gateway_mock import GatewayMock from tests.unit.utils.random_utils import random_job", "@pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker() worker.zeebe_adapter =", "from random import randint from threading import Event from unittest.mock import patch, MagicMock", "job = random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel())", "yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture", "x)) @pytest.fixture def task_type(): return str(uuid4()) @pytest.fixture def stop_after_test(): stop_test = Event() yield", "return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture def zeebe_client(grpc_create_channel): return ZeebeClient(channel=grpc_create_channel()) @pytest.fixture def zeebe_worker(zeebe_adapter): worker = ZeebeWorker()", "import random_job @pytest.fixture def job_with_adapter(zeebe_adapter): return random_job(zeebe_adapter=zeebe_adapter) @pytest.fixture def job_without_adapter(): return random_job() @pytest.fixture", "stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield mock @pytest.fixture def", "import ZeebeClient, ZeebeWorker, ZeebeTaskRouter, Job from pyzeebe.grpc_internals.zeebe_adapter import ZeebeAdapter from pyzeebe.task.task import Task", "with patch.object(zeebe_worker, \"stop_event\") as mock: yield mock @pytest.fixture def handle_not_alive_thread_spy(mocker): spy = mocker.spy(ZeebeWorker,", "Job: return job return MagicMock(wraps=simple_decorator) @pytest.fixture(scope=\"module\") def grpc_add_to_server(): from zeebe_grpc.gateway_pb2_grpc import add_GatewayServicer_to_server return", "@pytest.fixture def routers(): return [ZeebeTaskRouter() for _ in range(0, randint(2, 100))] @pytest.fixture def", "= Event() yield stop_test stop_test.set() @pytest.fixture def handle_task_mock(): with patch(\"pyzeebe.worker.worker.ZeebeWorker._handle_task\") as mock: yield", "= random_job(task) job.variables = dict(x=str(uuid4())) return job @pytest.fixture def zeebe_adapter(grpc_create_channel): return ZeebeAdapter(channel=grpc_create_channel()) @pytest.fixture" ]
[ "below dataset contains 6 unique words numbered 0-5. Ideally the word vector for", "0.7985747 ] # [ 0.57897186]] # # [[-0.90803576] # [ 0.75451994] # [", "5 indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for", "The row is same as a sentence, with words replaced # by its", "weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding layer embed", "0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has a dimension of (2,5,3,1) #", "np # training set. Contains a row of size 5 per train example.", "0.6548476 ] # [ 0.00760126]] # # [[-0.7074845 ] # [ 0.5100081 ]", "# [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721", "np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by", "vector by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) #", "a convolution 2d operations on top the expanded single channel embedded vectors embedded_chars_expanded", "dataset contains 6 unique words numbered 0-5. Ideally the word vector for #", "y_train = np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding", "# 4 and 5 indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) #", "(2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[", "[ 0.7002065 ]] # # [[ 0.2708087 ] # [ 0.7985747 ] #", "] # [ 0.57897186]] # # [[-0.90803576] # [ 0.75451994] # [ 0.8864901", "0.57897186]] # # [[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]] # #", "by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create", "The below dataset contains 6 unique words numbered 0-5. Ideally the word vector", "2d operations on top the expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed,", "] # [ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] # [", "of size 5 per train example. The row is same as a sentence,", "= tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding layer embed = tf.nn.embedding_lookup(embedding,", "Ideally the word vector for # 4 and 5 indexed words should be", "contains 6 unique words numbered 0-5. Ideally the word vector for # 4", "embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly distributing the weights.", "0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746]", "Contains a row of size 5 per train example. The row is same", "for # 4 and 5 indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]])", "be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train =", "per train example. The row is same as a sentence, with words replaced", "[ 0.57897186]] # # [[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]] #", "we can apply a convolution 2d operations on top the expanded single channel", "0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593]", "-0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [", "words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose", "= sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result # [[[ 0.89598155 0.4275496", "# [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded -", "[-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]]", "indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing", "-1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) #", "vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]);", "[ 0.5100081 ] # [ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933]", "testing purpose y_train = np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze", "0.7002065 ]] # # [[ 0.2708087 ] # [ 0.7985747 ] # [", "row is same as a sentence, with words replaced # by its equivalent", "0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792", "the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that we can apply", "[-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has", "layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that we can apply a convolution", "# [[ 0.6642673 ] # [ 0.6548476 ] # [ 0.00760126]] # #", "= np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train = np.array([0,1]) # Create", "[ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] #", "same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train = np.array([0,1])", "# [ 0.7985747 ] # [ 0.57897186]] # # [[-0.90803576] # [ 0.75451994]", "equivalent unique index. The below dataset contains 6 unique words numbered 0-5. Ideally", "# # # [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # #", "# [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436", "convolution 2d operations on top the expanded single channel embedded vectors embedded_chars_expanded =", "X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train = np.array([0,1]) #", "X_train) # So that we can apply a convolution 2d operations on top", "-0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded", "[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876]", "]]] # result_expanded - has a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638", "= tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result)", "# [-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087 ] #", "0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955", "] # [ 0.00760126]] # # [[-0.7074845 ] # [ 0.5100081 ] #", "[[ 0.6642673 ] # [ 0.6548476 ] # [ 0.00760126]] # # [[-0.7074845", "0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] # # [[-0.90803576] #", "purpose y_train = np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the", "]] # # [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]] # #", "-0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003", "# [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979", "# # [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883 ]] #", "# [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883 ]] # #", "0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has a", "is same as a sentence, with words replaced # by its equivalent unique", "[-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155", "# # [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[", "that we can apply a convolution 2d operations on top the expanded single", "0.57897186]] # # [[ 0.6642673 ] # [ 0.6548476 ] # [ 0.00760126]]", "embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded =", "0.00760126]] # # [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883 ]]", "tf.nn.embedding_lookup(embedding, X_train) # So that we can apply a convolution 2d operations on", "apply a convolution 2d operations on top the expanded single channel embedded vectors", "tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly distributing the weights. embedding =", "the embedding vector by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1,", "should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train", "embed = tf.nn.embedding_lookup(embedding, X_train) # So that we can apply a convolution 2d", "# [ 0.00760126]] # # [[-0.7074845 ] # [ 0.5100081 ] # [", "as np # training set. Contains a row of size 5 per train", "dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] #", "[ 0.00760126]] # # [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883", "# [ 0.5100081 ] # [ 0.7232883 ]] # # [[ 0.19342017] #", "[[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]] # # # [[[-0.45975637] #", "words replaced # by its equivalent unique index. The below dataset contains 6", "for testing purpose y_train = np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): #", "# [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087", "[ 0.57897186]] # # [[ 0.6642673 ] # [ 0.6548476 ] # [", "= tf.nn.embedding_lookup(embedding, X_train) # So that we can apply a convolution 2d operations", "[ 0.75451994] # [ 0.8864901 ]] # # [[-0.7074845 ] # [ 0.5100081", "training set. Contains a row of size 5 per train example. The row", "as a sentence, with words replaced # by its equivalent unique index. The", "# [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638", "0.19342017] # [-0.46509933] # [ 0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638", "# [ 0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638 ] # [", "vector for # 4 and 5 indexed words should be same. X_train =", "# output dummy for testing purpose y_train = np.array([0,1]) # Create the embeddings", "# [ 0.57897186]] # # [[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]]", "] # [ 0.57897186]] # # [[ 0.6642673 ] # [ 0.6548476 ]", "0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has a dimension", "a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]]", "train example. The row is same as a sentence, with words replaced #", "# [ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has a dimension of", "# [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087", "# [[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]] # # [[-0.7074845 ]", "output dummy for testing purpose y_train = np.array([0,1]) # Create the embeddings with", "result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result # [[[ 0.89598155", "0.75451994] # [ 0.8864901 ]] # # [[-0.7074845 ] # [ 0.5100081 ]", "Initiliaze the embedding vector by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3),", "import tensorflow as tf import numpy as np # training set. Contains a", "0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] #", "numbered 0-5. Ideally the word vector for # 4 and 5 indexed words", "and 5 indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy", "# training set. Contains a row of size 5 per train example. The", "numpy as np # training set. Contains a row of size 5 per", "with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT", "# [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]] # # # [[[-0.45975637]", "So that we can apply a convolution 2d operations on top the expanded", "# # [[ 0.6642673 ] # [ 0.6548476 ] # [ 0.00760126]] #", "index. The below dataset contains 6 unique words numbered 0-5. Ideally the word", "# [ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] # [ 0.8361807", "-0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] #", "[ 0.7497003 0.6044979 -0.5612638 ]]] # result_expanded - has a dimension of (2,5,3,1)", "1)) # create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that", "set. Contains a row of size 5 per train example. The row is", "# # [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385", "of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # #", "= np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector", "channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded", "print(result) print(result_expanded) # OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593] # [", "print(result_expanded) # OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225", "0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225", "[ 0.7985747 ] # [ 0.57897186]] # # [[ 0.6642673 ] # [", "# [ 0.6548476 ] # [ 0.00760126]] # # [[-0.7074845 ] # [", "[ 0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593] # [", "[ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638", "np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output dummy for testing purpose y_train = np.array([0,1]) # Create the", "distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding", "on top the expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with", "0.5100081 ] # [ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] #", "- has a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [", "embedding vector by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1))", "# [ 0.7002065 ]] # # [[ 0.2708087 ] # [ 0.7985747 ]", "size 5 per train example. The row is same as a sentence, with", "4 and 5 indexed words should be same. X_train = np.array([[0,1,4,2,3],[0,1,5,2,3]]) # output", "the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly distributing the", "tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train)", "[[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955", "# [ 0.57897186]] # # [[ 0.6642673 ] # [ 0.6548476 ] #", "# [ 0.75451994] # [ 0.8864901 ]] # # [[-0.7074845 ] # [", "0.6044979 -0.5612638 ]]] # result_expanded - has a dimension of (2,5,3,1) # [[[[-0.45975637]", "# [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782]", "0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] #", "0-5. Ideally the word vector for # 4 and 5 indexed words should", "# OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792", "embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding layer embed =", "# [-0.46509933] # [ 0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638 ]", "tf import numpy as np # training set. Contains a row of size", "sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result # [[[", "[-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087 ] # [", "replaced # by its equivalent unique index. The below dataset contains 6 unique", "# Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly", "]] # # [[ 0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]]", "0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] # # [[ 0.6642673", "[ 0.8864901 ]] # # [[-0.7074845 ] # [ 0.5100081 ] # [", "0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] #", "5 per train example. The row is same as a sentence, with words", "# # [[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]] # # [[-0.7074845", "# # [[ 0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] #", "[ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]]]", "Create the embeddings with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly distributing", "[ 0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065", "0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]]", "[[ 0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] # # [[-0.90803576]", "create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that we can", "as tf import numpy as np # training set. Contains a row of", "# result_expanded - has a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ]", "can apply a convolution 2d operations on top the expanded single channel embedded", "randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the", "result # [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [", "import numpy as np # training set. Contains a row of size 5", "word vector for # 4 and 5 indexed words should be same. X_train", "-1, 1)) # create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So", "# create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that we", "has a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065", "-0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657]", "0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955", "operations on top the expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1)", "0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [", "embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape)", "[ 0.21602225 -0.44228792 -0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782]", "0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]] #", "] # [ 0.7985747 ] # [ 0.57897186]] # # [[-0.90803576] # [", "unique words numbered 0-5. Ideally the word vector for # 4 and 5", "-0.5612638 ]]] # result_expanded - has a dimension of (2,5,3,1) # [[[[-0.45975637] #", "# [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] # # [[", "0.7985747 ] # [ 0.57897186]] # # [[ 0.6642673 ] # [ 0.6548476", "tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded)", "] # [ 0.7002065 ]] # # [[ 0.2708087 ] # [ 0.7985747", "the word vector for # 4 and 5 indexed words should be same.", "tf.Session() as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT #", "sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result #", "] # [ 0.7985747 ] # [ 0.57897186]] # # [[ 0.6642673 ]", "]] # # [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] #", "[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087 ]", "sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593]", "the weights. embedding = tf.Variable(tf.random_uniform((6, 3), -1, 1)) # create the embedding layer", "[[ 0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] # # [[", "0.8864901 ]] # # [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883", "dummy for testing purpose y_train = np.array([0,1]) # Create the embeddings with tf.name_scope(\"embeddings\"):", "]] # # [[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883 ]]", "tensorflow as tf import numpy as np # training set. Contains a row", "# by its equivalent unique index. The below dataset contains 6 unique words", "single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess: sess.run(tf.global_variables_initializer());", "# [[ 0.2708087 ] # [ 0.7985747 ] # [ 0.57897186]] # #", "words numbered 0-5. Ideally the word vector for # 4 and 5 indexed", "[-0.46509933] # [ 0.8361807 ]]] # # # [[[-0.45975637] # [-0.5756638 ] #", "# So that we can apply a convolution 2d operations on top the", "print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593] #", "-0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]] #", "the expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as", "0.6642673 ] # [ 0.6548476 ] # [ 0.00760126]] # # [[-0.7074845 ]", "[[-0.90803576] # [ 0.75451994] # [ 0.8864901 ]] # # [[-0.7074845 ] #", "3), -1, 1)) # create the embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) #", "[ 0.7985747 ] # [ 0.57897186]] # # [[-0.90803576] # [ 0.75451994] #", "# [ 0.7985747 ] # [ 0.57897186]] # # [[ 0.6642673 ] #", "-0.20533657] # [ 0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003", "# # [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]] # # #", "[[-0.7074845 ] # [ 0.5100081 ] # [ 0.7232883 ]] # # [[", "0.9624436 -0.99176955 0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]]", "expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session() as sess:", "] # [ 0.6548476 ] # [ 0.00760126]] # # [[-0.7074845 ] #", "result_expanded - has a dimension of (2,5,3,1) # [[[[-0.45975637] # [-0.5756638 ] #", "]]] # # # [[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] #", "6 unique words numbered 0-5. Ideally the word vector for # 4 and", "0.15964746] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] # #", "a sentence, with words replaced # by its equivalent unique index. The below", "with tf.name_scope(\"embeddings\"): # Initiliaze the embedding vector by randomly distributing the weights. embedding", "# Initiliaze the embedding vector by randomly distributing the weights. embedding = tf.Variable(tf.random_uniform((6,", "as sess: sess.run(tf.global_variables_initializer()); result,result_expanded = sess.run([embed,embedded_chars_expanded]); print(result_expanded.shape) print(result) print(result_expanded) # OUTPUT # result", "# result # [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] #", "by its equivalent unique index. The below dataset contains 6 unique words numbered", "embedding layer embed = tf.nn.embedding_lookup(embedding, X_train) # So that we can apply a", "a row of size 5 per train example. The row is same as", "example. The row is same as a sentence, with words replaced # by", "with words replaced # by its equivalent unique index. The below dataset contains", "its equivalent unique index. The below dataset contains 6 unique words numbered 0-5.", "OUTPUT # result # [[[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657]", "0.470721 0.00804782] # [ 0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496", "# [[ 0.89598155 0.4275496 0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596", "-0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721 0.00804782] # [ 0.7497003 0.6044979", "[ 0.6548476 ] # [ 0.00760126]] # # [[-0.7074845 ] # [ 0.5100081", "[ 0.7232883 ]] # # [[ 0.19342017] # [-0.46509933] # [ 0.8361807 ]]]", "# [ 0.7497003 0.6044979 -0.5612638 ]] # # [[ 0.89598155 0.4275496 0.00858593] #", "0.00858593] # [ 0.21602225 -0.44228792 -0.20533657] # [-0.48809385 -0.55618596 -0.73995876] # [-0.29004955 0.470721", "sentence, with words replaced # by its equivalent unique index. The below dataset", "top the expanded single channel embedded vectors embedded_chars_expanded = tf.expand_dims(embed, -1) with tf.Session()", "unique index. The below dataset contains 6 unique words numbered 0-5. Ideally the", "[[[[-0.45975637] # [-0.5756638 ] # [ 0.7002065 ]] # # [[ 0.2708087 ]", "# [ 0.8864901 ]] # # [[-0.7074845 ] # [ 0.5100081 ] #", "same as a sentence, with words replaced # by its equivalent unique index.", "] # [ 0.5100081 ] # [ 0.7232883 ]] # # [[ 0.19342017]", "row of size 5 per train example. The row is same as a" ]
[ "around -1450, so our plot should be above that # smoothen the fig", "on Yahoo Finance import numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData =", "def prepare_data(s): ymax = 1000 s = s * ymax / s.max() #", "plot should be above that # smoothen the fig window_size = len(s) //", "instead of taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0,", "that # smoothen the fig window_size = len(s) // 150 s = s.rolling(window_size,", "len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image,", "is (0,0), so the horizon line is around -1450, so our plot should", "# scale y range s = 1450 -s # The image top left", "our plot should be above that # smoothen the fig window_size = len(s)", "tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax = 1000 s = s", "more control, instead of taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x", "-s # The image top left is (0,0), so the horizon line is", "1450 -s # The image top left is (0,0), so the horizon line", "get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker", "left is (0,0), so the horizon line is around -1450, so our plot", "start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax = 1000 s = s *", "= s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg", "s = s * ymax / s.max() # scale y range s =", "import numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker =", "scale y range s = 1450 -s # The image top left is", "as plt import yfinance as yf #To access the financial data available on", "start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def", "prepare_data(s): ymax = 1000 s = s * ymax / s.max() # scale", "import yfinance as yf #To access the financial data available on Yahoo Finance", "dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices,", "x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height, color='#081A1C') plt.axis('off') plt.tight_layout() return fig", "numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d',", "s = 1450 -s # The image top left is (0,0), so the", "horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead of taking image dim\"\"\" fig,", "access the financial data available on Yahoo Finance import numpy as np def", "def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return", "The image top left is (0,0), so the horizon line is around -1450,", "arg for more control, instead of taking image dim\"\"\" fig, ax = plt.subplots()", "line is around -1450, so our plot should be above that # smoothen", "ymax = 1000 s = s * ymax / s.max() # scale y", "min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more", "so the horizon line is around -1450, so our plot should be above", "#To access the financial data available on Yahoo Finance import numpy as np", "ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height, color='#081A1C') plt.axis('off') plt.tight_layout() return", "s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead", "top left is (0,0), so the horizon line is around -1450, so our", "available on Yahoo Finance import numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData", "s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated", "// 150 s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height):", "# smoothen the fig window_size = len(s) // 150 s = s.rolling(window_size, min_periods=1).mean()", "image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x,", "image top left is (0,0), so the horizon line is around -1450, so", "matplotlib.pyplot as plt import yfinance as yf #To access the financial data available", "def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead of", "s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for", "fig window_size = len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return s def", "financial data available on Yahoo Finance import numpy as np def get_stock_data(tickerSymbol, start_date,", "/ s.max() # scale y range s = 1450 -s # The image", "= tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax = 1000 s =", "img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead of taking image", "of taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image,", "s * ymax / s.max() # scale y range s = 1450 -s", "the fig window_size = len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return s", "= 1450 -s # The image top left is (0,0), so the horizon", "1000 s = s * ymax / s.max() # scale y range s", "be above that # smoothen the fig window_size = len(s) // 150 s", "smoothen the fig window_size = len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return", "above that # smoothen the fig window_size = len(s) // 150 s =", "= yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax =", "yf #To access the financial data available on Yahoo Finance import numpy as", "yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax = 1000", "is around -1450, so our plot should be above that # smoothen the", "= s * ymax / s.max() # scale y range s = 1450", "np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date)", "150 s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image:", "return s def make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control,", "= 1000 s = s * ymax / s.max() # scale y range", "as yf #To access the financial data available on Yahoo Finance import numpy", "import matplotlib.pyplot as plt import yfinance as yf #To access the financial data", "make_picture(stock_prices, img, x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead of taking", "control, instead of taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x =", "plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height, color='#081A1C') plt.axis('off') plt.tight_layout()", "x_width_image, horizon_height): \"\"\"x_width_image: dedicated arg for more control, instead of taking image dim\"\"\"", "for more control, instead of taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img)", "Yahoo Finance import numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol)", "s.max() # scale y range s = 1450 -s # The image top", "yfinance as yf #To access the financial data available on Yahoo Finance import", "ymax / s.max() # scale y range s = 1450 -s # The", "end=end_date) return df_ticker def prepare_data(s): ymax = 1000 s = s * ymax", "the horizon line is around -1450, so our plot should be above that", "= len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices, img,", "Finance import numpy as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker", "-1450, so our plot should be above that # smoothen the fig window_size", "end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s):", "* ymax / s.max() # scale y range s = 1450 -s #", "df_ticker def prepare_data(s): ymax = 1000 s = s * ymax / s.max()", "plt import yfinance as yf #To access the financial data available on Yahoo", "should be above that # smoothen the fig window_size = len(s) // 150", "return df_ticker def prepare_data(s): ymax = 1000 s = s * ymax /", "# The image top left is (0,0), so the horizon line is around", "so our plot should be above that # smoothen the fig window_size =", "taking image dim\"\"\" fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices))", "horizon line is around -1450, so our plot should be above that #", "the financial data available on Yahoo Finance import numpy as np def get_stock_data(tickerSymbol,", "dedicated arg for more control, instead of taking image dim\"\"\" fig, ax =", "ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height, color='#081A1C')", "= plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height, color='#081A1C') plt.axis('off')", "y range s = 1450 -s # The image top left is (0,0),", "df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax = 1000 s", "range s = 1450 -s # The image top left is (0,0), so", "(0,0), so the horizon line is around -1450, so our plot should be", "data available on Yahoo Finance import numpy as np def get_stock_data(tickerSymbol, start_date, end_date):", "\"\"\"x_width_image: dedicated arg for more control, instead of taking image dim\"\"\" fig, ax", "window_size = len(s) // 150 s = s.rolling(window_size, min_periods=1).mean() return s def make_picture(stock_prices,", "fig, ax = plt.subplots() ax.imshow(img) x = np.linspace(0, x_width_image, len(stock_prices)) ax.fill_between(x, stock_prices, horizon_height,", "tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date, end=end_date) return df_ticker def prepare_data(s): ymax", "as np def get_stock_data(tickerSymbol, start_date, end_date): tickerData = yf.Ticker(tickerSymbol) df_ticker = tickerData.history(period='1d', start=start_date," ]
[ "Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY =", "from enum import Enum class Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight'", "(Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity'", "USER = 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def", "= 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') return self.send(user,", "= 'position' USER = 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url =", "= {} user['userId'] = user_name user['name'] = user_name user['age'] = 0 user['mail'] =", "user_name user['name'] = user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD>", "+ to, content) def add_user (self, user_name): user = {} user['userId'] = user_name", "Enum class Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature'", "json from enum import Enum class Recivers (Enum): FLEX = 'flex' WEIGHT =", "add_user (self, user_name): user = {} user['userId'] = user_name user['name'] = user_name user['age']", "import requests import datetime import json from enum import Enum class Recivers (Enum):", "'humidity' NOISE = 'noise' LIGHT = 'light' POSITION = 'position' USER = 'user'", "= 'light' POSITION = 'position' USER = 'user' class RestSender: def __init__(self, url=", "= url def send(self, content, to): return requests.post(self.url + to, content) def add_user", "__init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content, to): return requests.post(self.url +", "to, content) def add_user (self, user_name): user = {} user['userId'] = user_name user['name']", "user = {} user['userId'] = user_name user['name'] = user_name user['age'] = 0 user['mail']", "HUMIDITY = 'humidity' NOISE = 'noise' LIGHT = 'light' POSITION = 'position' USER", "to): return requests.post(self.url + to, content) def add_user (self, user_name): user = {}", "import datetime import json from enum import Enum class Recivers (Enum): FLEX =", "user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')", "'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE = 'noise'", "requests import datetime import json from enum import Enum class Recivers (Enum): FLEX", "content) def add_user (self, user_name): user = {} user['userId'] = user_name user['name'] =", "datetime import json from enum import Enum class Recivers (Enum): FLEX = 'flex'", "'light' POSITION = 'position' USER = 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'):", "= 'noise' LIGHT = 'light' POSITION = 'position' USER = 'user' class RestSender:", "'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content,", "def send(self, content, to): return requests.post(self.url + to, content) def add_user (self, user_name):", "= user_name user['name'] = user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] =", "WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT", "<reponame>Alvarohf/University-work import requests import datetime import json from enum import Enum class Recivers", "'noise' LIGHT = 'light' POSITION = 'position' USER = 'user' class RestSender: def", "class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content, to):", "send(self, content, to): return requests.post(self.url + to, content) def add_user (self, user_name): user", "NOISE = 'noise' LIGHT = 'light' POSITION = 'position' USER = 'user' class", "FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE", "import json from enum import Enum class Recivers (Enum): FLEX = 'flex' WEIGHT", "def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content, to): return requests.post(self.url", "requests.post(self.url + to, content) def add_user (self, user_name): user = {} user['userId'] =", "{} user['userId'] = user_name user['name'] = user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com'", "user['name'] = user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created']", "RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content, to): return", "return requests.post(self.url + to, content) def add_user (self, user_name): user = {} user['userId']", "url= 'http://192.168.127.12:8080/'): self.url = url def send(self, content, to): return requests.post(self.url + to,", "user['userId'] = user_name user['name'] = user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw']", "LIGHT = 'light' POSITION = 'position' USER = 'user' class RestSender: def __init__(self,", "class Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY", "TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT = 'light' POSITION", "(self, user_name): user = {} user['userId'] = user_name user['name'] = user_name user['age'] =", "= user_name user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created'] =", "import Enum class Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE =", "enum import Enum class Recivers (Enum): FLEX = 'flex' WEIGHT = 'weight' TEMPERATURE", "url def send(self, content, to): return requests.post(self.url + to, content) def add_user (self,", "self.url = url def send(self, content, to): return requests.post(self.url + to, content) def", "user_name): user = {} user['userId'] = user_name user['name'] = user_name user['age'] = 0", "'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT = 'light' POSITION = 'position'", "0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') return self.send(user, Recivers.USER.value)", "= 'humidity' NOISE = 'noise' LIGHT = 'light' POSITION = 'position' USER =", "= 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT =", "= 'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT = 'light' POSITION =", "= 'flex' WEIGHT = 'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE =", "'position' USER = 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url", "def add_user (self, user_name): user = {} user['userId'] = user_name user['name'] = user_name", "user['age'] = 0 user['mail'] = <EMAIL>_name+'@'+user_<EMAIL>+'.com' user['passw'] = <PASSWORD> user['created'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') return", "content, to): return requests.post(self.url + to, content) def add_user (self, user_name): user =", "= 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url = url def send(self,", "POSITION = 'position' USER = 'user' class RestSender: def __init__(self, url= 'http://192.168.127.12:8080/'): self.url", "'http://192.168.127.12:8080/'): self.url = url def send(self, content, to): return requests.post(self.url + to, content)", "'weight' TEMPERATURE = 'temperature' HUMIDITY = 'humidity' NOISE = 'noise' LIGHT = 'light'" ]
[ "#Changes pixel value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB format cv2.imshow('New Image',image)", "print(image[0:100,0:100]) #Changes pixel value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB format cv2.imshow('New", "cv2 image=cv2.imread(r'md.jpg',flags=1) print(image[0:100,0:100]) #Changes pixel value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB", "pixel value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB format cv2.imshow('New Image',image) cv2.waitKey(0)", "import cv2 image=cv2.imread(r'md.jpg',flags=1) print(image[0:100,0:100]) #Changes pixel value in the original images #image[0:100,0:100]=255#fully white", "image=cv2.imread(r'md.jpg',flags=1) print(image[0:100,0:100]) #Changes pixel value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB format", "value in the original images #image[0:100,0:100]=255#fully white image[0:100,0:100]=[165,42,42]#RGB format cv2.imshow('New Image',image) cv2.waitKey(0) cv2.destroyAllWindows()" ]
[ "# We could have also used: # return ord(ch) - ord('0') return char_set.index(ch)", "needle): # CAREFUL: Beware of these corner cases! if needle == \"\": return", "CAREFUL: Beware of these corner cases! if needle == \"\": return 0 if", "HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD if haystack_hash ==", "number as base, but its better to # take the alphabet size to", "- ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of these corner", "= ( (haystack_hash - base_power_up * old_char) * BASE + ch ) %", "- base_power_up * old_char) * BASE + ch ) % HASH_MOD if (", "on a rolling basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for i", "needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char)", "HASH_MOD = 1000000007 # We can use any number as base, but its", "could have also used: # return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack,", "1000000007 # We can use any number as base, but its better to", "# return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware", "char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of these corner cases! if needle", "return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of these corner cases! if", "- len(needle) haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch", "0 for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash =", "ch ) % HASH_MOD if ( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos]", "(haystack_hash - base_power_up * old_char) * BASE + ch ) % HASH_MOD if", "+ 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash = (", "+ haystack_char) % HASH_MOD if haystack_hash == needle_hash and needle == haystack[0 :", "return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of", "get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up * old_char) * BASE + ch", "* old_char) * BASE + ch ) % HASH_MOD if ( haystack_hash ==", "26 needle_hash = 0 haystack_hash = 0 for i in range(needle): needle_char =", "+ 1 - len(needle) haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos -", "return haystack_start_pos return -1 def main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack,", "# We can use any number as base, but its better to #", "== \"\": return 0 if len(needle) == 0 or len(needle) > len(haystack): return", "len(haystack): return -1 HASH_MOD = 1000000007 # We can use any number as", "string.digits): # We could have also used: # return ord(ch) - ord('0') return", "hashes on a rolling basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for", "or len(needle) > len(haystack): return -1 HASH_MOD = 1000000007 # We can use", "We can use any number as base, but its better to # take", "= 0 for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash", "HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos = i + 1 - len(needle)", "needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main(): haystack", "take the alphabet size to minimize collisions BASE = 26 needle_hash = 0", "haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD haystack_hash", "): return haystack_start_pos return -1 def main(): haystack = \"abcs\" needle = \"\"", "( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1", "string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We", "its better to # take the alphabet size to minimize collisions BASE =", "any number as base, but its better to # take the alphabet size", "def rabin_karp(haystack, needle): # CAREFUL: Beware of these corner cases! if needle ==", "( (haystack_hash - base_power_up * old_char) * BASE + ch ) % HASH_MOD", "range(len(needle), len(haystack)): haystack_start_pos = i + 1 - len(needle) haystack_end_pos = i +", ": len(needle)]: return 0 # Now compute hashes on a rolling basis base_power_up", "if needle == \"\": return 0 if len(needle) == 0 or len(needle) >", "Now compute hashes on a rolling basis base_power_up = pow(BASE, (len(needle) - 1),", "old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash -", "= get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up", "import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have also", "needle_hash = 0 haystack_hash = 0 for i in range(needle): needle_char = get_char_code(needle[i])", "Verified on https://leetcode.com/problems/implement-strstr import string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters", "return -1 def main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle)) if", "1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos = i + 1 -", "haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD if haystack_hash == needle_hash", "haystack_char) % HASH_MOD if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]:", "len(needle)]: return 0 # Now compute hashes on a rolling basis base_power_up =", "a rolling basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for i in", "BASE = 26 needle_hash = 0 haystack_hash = 0 for i in range(needle):", "- 1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up * old_char)", "len(haystack)): haystack_start_pos = i + 1 - len(needle) haystack_end_pos = i + 1", "get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD", "haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main(): haystack = \"abcs\" needle =", "= 0 haystack_hash = 0 for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char", "i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash =", "-1 def main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle)) if __name__", "= pow(BASE, (len(needle) - 1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos =", "ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of these", "haystack_start_pos = i + 1 - len(needle) haystack_end_pos = i + 1 old_char", "size to minimize collisions BASE = 26 needle_hash = 0 haystack_hash = 0", "for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE", "* haystack_hash + haystack_char) % HASH_MOD if haystack_hash == needle_hash and needle ==", "also used: # return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): #", "have also used: # return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle):", "haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def", "= get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD haystack_hash =", "+ string.digits): # We could have also used: # return ord(ch) - ord('0')", "needle == \"\": return 0 if len(needle) == 0 or len(needle) > len(haystack):", "these corner cases! if needle == \"\": return 0 if len(needle) == 0", "= 26 needle_hash = 0 haystack_hash = 0 for i in range(needle): needle_char", "base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos", "% HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD if haystack_hash", "* BASE + ch ) % HASH_MOD if ( haystack_hash == needle_hash and", "get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up *", "from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could", "rolling basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for i in range(len(needle),", "if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]: return 0 #", "0 if len(needle) == 0 or len(needle) > len(haystack): return -1 HASH_MOD =", "i in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE *", "if ( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return", "ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL: Beware of these corner cases!", "needle_hash and needle == haystack[0 : len(needle)]: return 0 # Now compute hashes", "to # take the alphabet size to minimize collisions BASE = 26 needle_hash", "= get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up * old_char) * BASE +", "> len(haystack): return -1 HASH_MOD = 1000000007 # We can use any number", "haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle)) if __name__ == \"__main__\": main()", "can use any number as base, but its better to # take the", "cases! if needle == \"\": return 0 if len(needle) == 0 or len(needle)", "Beware of these corner cases! if needle == \"\": return 0 if len(needle)", "range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash +", "= (BASE * needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash", "if len(needle) == 0 or len(needle) > len(haystack): return -1 HASH_MOD = 1000000007", "compute hashes on a rolling basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD)", "collisions BASE = 26 needle_hash = 0 haystack_hash = 0 for i in", "i + 1 - len(needle) haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos", "# Now compute hashes on a rolling basis base_power_up = pow(BASE, (len(needle) -", "1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up * old_char) *", "# Verified on https://leetcode.com/problems/implement-strstr import string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch,", "len(needle) > len(haystack): return -1 HASH_MOD = 1000000007 # We can use any", "== haystack[0 : len(needle)]: return 0 # Now compute hashes on a rolling", "get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have also used: # return ord(ch)", "alphabet size to minimize collisions BASE = 26 needle_hash = 0 haystack_hash =", "haystack_hash == needle_hash and needle == haystack[0 : len(needle)]: return 0 # Now", "haystack_start_pos return -1 def main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle))", "len(needle) haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch =", "needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD if", ") % HASH_MOD if ( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ):", "return 0 if len(needle) == 0 or len(needle) > len(haystack): return -1 HASH_MOD", "https://leetcode.com/problems/implement-strstr import string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits):", "and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main(): haystack =", "+ needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char) % HASH_MOD", "- 1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos = i + 1", "better to # take the alphabet size to minimize collisions BASE = 26", "We could have also used: # return ord(ch) - ord('0') return char_set.index(ch) def", "minimize collisions BASE = 26 needle_hash = 0 haystack_hash = 0 for i", "(BASE * haystack_hash + haystack_char) % HASH_MOD if haystack_hash == needle_hash and needle", "but its better to # take the alphabet size to minimize collisions BASE", "i in range(len(needle), len(haystack)): haystack_start_pos = i + 1 - len(needle) haystack_end_pos =", "haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i])", "= i + 1 - len(needle) haystack_end_pos = i + 1 old_char =", "= 1000000007 # We can use any number as base, but its better", "main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle)) if __name__ == \"__main__\":", "== needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main():", "in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash", "ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash - base_power_up * old_char) * BASE", "== haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main(): haystack = \"abcs\" needle", "-1 HASH_MOD = 1000000007 # We can use any number as base, but", "* needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char)", "\"\": return 0 if len(needle) == 0 or len(needle) > len(haystack): return -1", "return -1 HASH_MOD = 1000000007 # We can use any number as base,", "functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have", "for i in range(len(needle), len(haystack)): haystack_start_pos = i + 1 - len(needle) haystack_end_pos", "(BASE * needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash +", "# take the alphabet size to minimize collisions BASE = 26 needle_hash =", "needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos return -1 def main(): haystack = \"abcs\"", "1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash = ( (haystack_hash", "= get_char_code(needle[i]) haystack_char = get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char) %", "basis base_power_up = pow(BASE, (len(needle) - 1), HASH_MOD) for i in range(len(needle), len(haystack)):", "= i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1]) ch = get_char_code(haystack[i]) haystack_hash", "used: # return ord(ch) - ord('0') return char_set.index(ch) def rabin_karp(haystack, needle): # CAREFUL:", "base, but its better to # take the alphabet size to minimize collisions", "base_power_up * old_char) * BASE + ch ) % HASH_MOD if ( haystack_hash", "use any number as base, but its better to # take the alphabet", "needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE * haystack_hash + haystack_char) %", "0 haystack_hash = 0 for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char =", "needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE *", "haystack_hash = 0 for i in range(needle): needle_char = get_char_code(needle[i]) haystack_char = get_char_code(haystack[i])", "len(needle) == 0 or len(needle) > len(haystack): return -1 HASH_MOD = 1000000007 #", "% HASH_MOD if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]: return", "char_set=string.ascii_letters + string.digits): # We could have also used: # return ord(ch) -", "haystack[0 : len(needle)]: return 0 # Now compute hashes on a rolling basis", "HASH_MOD if haystack_hash == needle_hash and needle == haystack[0 : len(needle)]: return 0", "0 # Now compute hashes on a rolling basis base_power_up = pow(BASE, (len(needle)", "in range(len(needle), len(haystack)): haystack_start_pos = i + 1 - len(needle) haystack_end_pos = i", "= (BASE * haystack_hash + haystack_char) % HASH_MOD if haystack_hash == needle_hash and", "0 or len(needle) > len(haystack): return -1 HASH_MOD = 1000000007 # We can", "needle == haystack[0 : len(needle)]: return 0 # Now compute hashes on a", "BASE + ch ) % HASH_MOD if ( haystack_hash == needle_hash and needle", "the alphabet size to minimize collisions BASE = 26 needle_hash = 0 haystack_hash", "of these corner cases! if needle == \"\": return 0 if len(needle) ==", "and needle == haystack[0 : len(needle)]: return 0 # Now compute hashes on", "on https://leetcode.com/problems/implement-strstr import string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters +", "% HASH_MOD if ( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return", "def main(): haystack = \"abcs\" needle = \"\" print(rabin_karp(haystack, needle)) if __name__ ==", "return 0 # Now compute hashes on a rolling basis base_power_up = pow(BASE,", "== 0 or len(needle) > len(haystack): return -1 HASH_MOD = 1000000007 # We", "(len(needle) - 1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos = i +", "+ ch ) % HASH_MOD if ( haystack_hash == needle_hash and needle ==", "HASH_MOD if ( haystack_hash == needle_hash and needle == haystack[haystack_start_pos:haystack_end_pos] ): return haystack_start_pos", "rabin_karp(haystack, needle): # CAREFUL: Beware of these corner cases! if needle == \"\":", "== needle_hash and needle == haystack[0 : len(needle)]: return 0 # Now compute", "@lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have also used: #", "haystack_hash = ( (haystack_hash - base_power_up * old_char) * BASE + ch )", "corner cases! if needle == \"\": return 0 if len(needle) == 0 or", "1 - len(needle) haystack_end_pos = i + 1 old_char = get_char_code(haystack[haystack_start_pos - 1])", "import string from functools import lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): #", "to minimize collisions BASE = 26 needle_hash = 0 haystack_hash = 0 for", "pow(BASE, (len(needle) - 1), HASH_MOD) for i in range(len(needle), len(haystack)): haystack_start_pos = i", "def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have also used: # return", "as base, but its better to # take the alphabet size to minimize", "get_char_code(haystack[i]) needle_hash = (BASE * needle_hash + needle_char) % HASH_MOD haystack_hash = (BASE", "haystack_hash + haystack_char) % HASH_MOD if haystack_hash == needle_hash and needle == haystack[0", "old_char) * BASE + ch ) % HASH_MOD if ( haystack_hash == needle_hash", "# CAREFUL: Beware of these corner cases! if needle == \"\": return 0", "lru_cache @lru_cache(maxsize=None) def get_char_code(ch, char_set=string.ascii_letters + string.digits): # We could have also used:" ]
[ "not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not", "soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars']", "if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None:", "is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] =", "of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None: item['pob'] = soup.find(text='Place of", "not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text", "= soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height')", "soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia']", "is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None:", "for table in tables: links = table.find_all(text='READ MORE') for link in links: yield", "in tables: links = table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s' %", "= soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None: item['address'] = soup.find(text='Last Known", "= ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8')", "item['dob'] = '' item['pob'] = '' item['complexion'] = '' item['reward'] = '' item['height']", "'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header +", "= '' item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name') is not None:", "Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i in values: if \"Summary:\" in", "is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] =", "['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'),", "soup.select('.field-item') tables.pop(0) for table in tables: links = table.find_all(text='READ MORE') for link in", "'' item['offense'] = '' item['aka'] = '' item['sex'] = '' item['dob'] = ''", "'' item['scars'] = '' item['address'] = '' item['synopsis'] = '' item['warning'] = ''", "soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not", "'' item['reward'] = '' item['height'] = '' item['weight'] = '' item['eyes'] = ''", "= BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka']", "if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None:", "soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i in values: if", "item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias')", "None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if", "item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i in", "Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text", "item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None: item['dob'] = soup.find(text='Date", "from black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider):", "'\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table in tables:", "item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender')", "if soup.find(text='Date of Birth') is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if", "None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i", "= soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is", "= soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is", "item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion'] =", "Known Location') is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]')", "not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for", "= 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header", "of Birth') is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone')", "tables.pop(0) for table in tables: links = table.find_all(text='READ MORE') for link in links:", "Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n')", "Spider, Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls", "response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body,", "'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables =", "soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table in tables: links", "import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov']", "BLICEFugitivesListItem from scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList'", "yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item", "soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not", "+ '\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table in", "= soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None: item['dob'] = soup.find(text='Date of", "soup.find(text='Date of Birth') is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place", "in values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text:", "soup.select('div[class=\"field-label\"]') if values: for i in values: if \"Summary:\" in i.text: item['synopsis'] =", "callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] =", "soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not", "item['sex'] = '' item['dob'] = '' item['pob'] = '' item['complexion'] = '' item['reward']", "in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body,", "= '' item['address'] = '' item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name')", "Birth') is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth')", "'' item['pob'] = '' item['complexion'] = '' item['reward'] = '' item['height'] = ''", "item['complexion'] = '' item['reward'] = '' item['height'] = '' item['weight'] = '' item['eyes']", "'' item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name') is not None: item['name']", "Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not", "None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if", "item['scars'] = '' item['address'] = '' item['synopsis'] = '' item['warning'] = '' if", "None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text", "soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not", "= soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known", "= soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i in values:", "if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None:", "'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka'] = ''", "not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None:", "soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text", "'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self,", "is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] =", "None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None:", "soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text", "import BLICEFugitivesListItem from scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name =", "item['haia'] = '' item['scars'] = '' item['address'] = '' item['synopsis'] = '' item['warning']", "links = table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info)", "soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not", "of Birth') is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of", "= soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin", "encoding='utf-8') as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item')", "response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense'] =", "= soup.select('.field-item') tables.pop(0) for table in tables: links = table.find_all(text='READ MORE') for link", "'' item['height'] = '' item['weight'] = '' item['eyes'] = '' item['haia'] = ''", "i in values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in", "is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] =", "if soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is", "if soup.find(text='Place of Birth') is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if", "if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None:", "item['reward'] = '' item['height'] = '' item['weight'] = '' item['eyes'] = '' item['haia']", "soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text", "in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text: item['warning'] = i.next_sibling.text yield", "not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text", "Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values: for i in values: if \"Summary:\"", "= soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is", "soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height']", "['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as", "import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted']", "MORE') for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response):", "'' if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not", "not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text", "not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text", "start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a',", "if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None:", "links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml')", "= soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is", "soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes']", "def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup", "soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None: item['pob'] = soup.find(text='Place", "= '' item['dob'] = '' item['pob'] = '' item['complexion'] = '' item['reward'] =", "not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None: item['dob']", "as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0)", "= 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def", "tables: links = table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']),", "= '' item['offense'] = '' item['aka'] = '' item['sex'] = '' item['dob'] =", "'' item['warning'] = '' if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if", "= '' if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is", "is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] =", "table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self,", "item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name') is not None: item['name'] =", "is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] =", "= '' item['pob'] = '' item['complexion'] = '' item['reward'] = '' item['height'] =", "if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is", "item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None: item['address'] = soup.find(text='Last", "BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka'] =", "not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text", "is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward']", "with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml')", "parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup =", "if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None:", "'' item['dob'] = '' item['pob'] = '' item['complexion'] = '' item['reward'] = ''", "= soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is", "if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is", "= BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table in tables: links =", "None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth') is not None: item['dob'] =", "Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward')", "Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None:", "values = soup.select('div[class=\"field-label\"]') if values: for i in values: if \"Summary:\" in i.text:", "= '' item['warning'] = '' if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text", "is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] =", "= table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def", "is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if values:", "BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka'] = '' item['sex'] = ''", "item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if", "of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if", "table in tables: links = table.find_all(text='READ MORE') for link in links: yield Request(url='https://www.ice.gov%s'", "= soup.select('div[class=\"field-label\"]') if values: for i in values: if \"Summary:\" in i.text: item['synopsis']", "bs4 import BeautifulSoup from black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request import", "f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for", "soup.find(text='Place of Birth') is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin", "allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response):", "if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None:", "if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text: item['warning'] =", "open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f: f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables", "% (link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem()", "not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text", "os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header", "None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None: item['address'] =", "for i in values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\"", "item['pob'] = '' item['complexion'] = '' item['reward'] = '' item['height'] = '' item['weight']", "item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair')", "if soup.find(text='Last Known Location') is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values", "item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes')", "item['height'] = '' item['weight'] = '' item['eyes'] = '' item['haia'] = '' item['scars']", "= '' item['aka'] = '' item['sex'] = '' item['dob'] = '' item['pob'] =", "soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight']", "f.write(self.header + '\\n') soup = BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table", "None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not None: item['complexion']", "if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None:", "'' item['eyes'] = '' item['haia'] = '' item['scars'] = '' item['address'] = ''", "\"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text: item['warning'] = i.next_sibling.text", "class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header =", "name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning'", "link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup =", "header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with open(os.path.abspath('results/BL_ICEFugitivesList.txt'), 'a', encoding='utf-8') as f:", "soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex']", "'' item['weight'] = '' item['eyes'] = '' item['haia'] = '' item['scars'] = ''", "soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is", "'' item['address'] = '' item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name') is", "from scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains", "if values: for i in values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text", "soup.find(text='Skin Tone') is not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not", "tables = soup.select('.field-item') tables.pop(0) for table in tables: links = table.find_all(text='READ MORE') for", "not None: item['complexion'] = soup.find(text='Skin Tone').parent.next_sibling.next_sibling.text if soup.find(text='Reward') is not None: item['reward'] =", "scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains =", "soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not", "item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last", "BeautifulSoup from black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request import os class", "import BeautifulSoup from black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request import os", "is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] =", "not None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text", "None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if", "is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None:", "item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date", "'' item['haia'] = '' item['scars'] = '' item['address'] = '' item['synopsis'] = ''", "soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of Birth')", "black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request import os class BlIcefugitiveslistSpider(Spider): name", "Location') is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values = soup.select('div[class=\"field-label\"]') if", "is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is not", "soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense']", "item['aka'] = '' item['sex'] = '' item['dob'] = '' item['pob'] = '' item['complexion']", "= soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not None: item['sex'] = soup.find(text='Gender').parent.next_sibling.next_sibling.text if soup.find(text='Date of", "item['address'] = '' item['synopsis'] = '' item['warning'] = '' if soup.find(text='Name') is not", "soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka']", "soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks') is not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location')", "BeautifulSoup(response.body, 'lxml') tables = soup.select('.field-item') tables.pop(0) for table in tables: links = table.find_all(text='READ", "for link in links: yield Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup", "(link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name']", "Request import os class BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls =", "= '' item['weight'] = '' item['eyes'] = '' item['haia'] = '' item['scars'] =", "item['warning'] = '' if soup.find(text='Name') is not None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for')", "= '' item['complexion'] = '' item['reward'] = '' item['height'] = '' item['weight'] =", "None: item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if", "item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is not None: item['haia'] = soup.find(text='Hair').parent.next_sibling.next_sibling.text if soup.find(text='Scars/Marks')", "item['eyes'] = '' item['haia'] = '' item['scars'] = '' item['address'] = '' item['synopsis']", "item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None: item['pob']", "= '' item['height'] = '' item['weight'] = '' item['eyes'] = '' item['haia'] =", "None: item['name'] = soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if", "= '' item['scars'] = '' item['address'] = '' item['synopsis'] = '' item['warning'] =", "soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense'] = ''", "item['name'] = '' item['offense'] = '' item['aka'] = '' item['sex'] = '' item['dob']", "= '' item['reward'] = '' item['height'] = '' item['weight'] = '' item['eyes'] =", "soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if soup.find(text='Gender') is not", "None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight') is not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if", "item = BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka'] = '' item['sex']", "= soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if soup.find(text='Hair') is", "is not None: item['dob'] = soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is", "not None: item['scars'] = soup.find(text='Scars/Marks').parent.next_sibling.next_sibling.text if soup.find(text='Last Known Location') is not None: item['address']", "from bs4 import BeautifulSoup from black_list.items import BLICEFugitivesListItem from scrapy import Spider, Request", "values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text: item['warning']", "item['reward'] = soup.find(text='Reward').parent.next_sibling.next_sibling.text if soup.find(text='Height') is not None: item['height'] = soup.find(text='Height').parent.next_sibling.next_sibling.text if soup.find(text='Weight')", "i.text: item['synopsis'] = i.next_sibling.text if \"Warning:\" in i.text: item['warning'] = i.next_sibling.text yield item", "'' item['sex'] = '' item['dob'] = '' item['pob'] = '' item['complexion'] = ''", "Request(url='https://www.ice.gov%s' % (link.parent['href']), callback=self.get_info) def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item =", "'lxml') tables = soup.select('.field-item') tables.pop(0) for table in tables: links = table.find_all(text='READ MORE')", "not None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text", "values: for i in values: if \"Summary:\" in i.text: item['synopsis'] = i.next_sibling.text if", "Birth') is not None: item['pob'] = soup.find(text='Place of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Skin Tone') is", "'' item['complexion'] = '' item['reward'] = '' item['height'] = '' item['weight'] = ''", "= '' item['haia'] = '' item['scars'] = '' item['address'] = '' item['synopsis'] =", "None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not None: item['aka'] = soup.find(text='Alias').parent.next_sibling.next_sibling.text if", "= '' item['sex'] = '' item['dob'] = '' item['pob'] = '' item['complexion'] =", "item['weight'] = '' item['eyes'] = '' item['haia'] = '' item['scars'] = '' item['address']", "= ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case Number|Scars|Address|Synopsis|Warning' def parse(self, response): with", "get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = '' item['offense']", "soup.find(text='Last Known Location') is not None: item['address'] = soup.find(text='Last Known Location').parent.next_sibling.next_sibling.text values =", "= '' item['eyes'] = '' item['haia'] = '' item['scars'] = '' item['address'] =", "def get_info(self, response): soup = BeautifulSoup(response.body, 'lxml') item = BLICEFugitivesListItem() item['name'] = ''", "soup.find(text='Name').parent.next_sibling.next_sibling.text if soup.select('div.wanted-for') is not None: item['offense'] = soup.select('div.wanted-for')[0].text if soup.find(text='Alias') is not", "= BLICEFugitivesListItem() item['name'] = '' item['offense'] = '' item['aka'] = '' item['sex'] =", "None: item['weight'] = soup.find(text='Weight').parent.next_sibling.next_sibling.text if soup.find(text='Eyes') is not None: item['eyes'] = soup.find(text='Eyes').parent.next_sibling.next_sibling.text if", "BlIcefugitiveslistSpider(Spider): name = 'BL_ICEFugitivesList' allowed_domains = ['www.ice.gov'] start_urls = ['https://www.ice.gov/most-wanted'] header = 'Name|Status|Offense|AKA|Sex|DOB|POB|Complexion|Reward|Height|Weight|Eyes|Haia|Case", "item['offense'] = '' item['aka'] = '' item['sex'] = '' item['dob'] = '' item['pob']", "= soup.find(text='Date of Birth').parent.next_sibling.next_sibling.text if soup.find(text='Place of Birth') is not None: item['pob'] =", "'' item['aka'] = '' item['sex'] = '' item['dob'] = '' item['pob'] = ''" ]
[ "= ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught: id: {},", "= eid self.message = message class ExceptionDemo: def draw(self, number): print('called compute(%s)' %", "500 or number <= 0: raise UserDefinedException(101, 'number out of bound') else: print('normal", "class UserDefinedException(Exception): def __init__(self, eid, message): self.eid = eid self.message = message class", "str(number)) if number > 500 or number <= 0: raise UserDefinedException(101, 'number out", "print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception", "bound') else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as", "% str(number)) if number > 500 or number <= 0: raise UserDefinedException(101, 'number", "self.message = message class ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number)) if", "class ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number)) if number > 500", "0: raise UserDefinedException(101, 'number out of bound') else: print('normal exit') demo = ExceptionDemo()", "demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught: id: {}, message: {}'.format(e.eid, e.message))", "number <= 0: raise UserDefinedException(101, 'number out of bound') else: print('normal exit') demo", "exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught:", "demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught: id:", "__init__(self, eid, message): self.eid = eid self.message = message class ExceptionDemo: def draw(self,", "eid self.message = message class ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number))", "'number out of bound') else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900)", "message): self.eid = eid self.message = message class ExceptionDemo: def draw(self, number): print('called", "out of bound') else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except", "= message class ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number)) if number", "ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught: id: {}, message:", "number): print('called compute(%s)' % str(number)) if number > 500 or number <= 0:", "print('called compute(%s)' % str(number)) if number > 500 or number <= 0: raise", "<= 0: raise UserDefinedException(101, 'number out of bound') else: print('normal exit') demo =", "self.eid = eid self.message = message class ExceptionDemo: def draw(self, number): print('called compute(%s)'", "or number <= 0: raise UserDefinedException(101, 'number out of bound') else: print('normal exit')", "UserDefinedException(Exception): def __init__(self, eid, message): self.eid = eid self.message = message class ExceptionDemo:", "ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number)) if number > 500 or", "compute(%s)' % str(number)) if number > 500 or number <= 0: raise UserDefinedException(101,", "UserDefinedException(101, 'number out of bound') else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125)", "try: demo.draw(125) demo.draw(900) except UserDefinedException as e: print('Exception caught: id: {}, message: {}'.format(e.eid,", "message class ExceptionDemo: def draw(self, number): print('called compute(%s)' % str(number)) if number >", "eid, message): self.eid = eid self.message = message class ExceptionDemo: def draw(self, number):", "def __init__(self, eid, message): self.eid = eid self.message = message class ExceptionDemo: def", "of bound') else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException", "def draw(self, number): print('called compute(%s)' % str(number)) if number > 500 or number", "else: print('normal exit') demo = ExceptionDemo() try: demo.draw(125) demo.draw(900) except UserDefinedException as e:", "> 500 or number <= 0: raise UserDefinedException(101, 'number out of bound') else:", "raise UserDefinedException(101, 'number out of bound') else: print('normal exit') demo = ExceptionDemo() try:", "number > 500 or number <= 0: raise UserDefinedException(101, 'number out of bound')", "if number > 500 or number <= 0: raise UserDefinedException(101, 'number out of", "draw(self, number): print('called compute(%s)' % str(number)) if number > 500 or number <=" ]
[ "range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result =", "node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node):", "def visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' == ')", "') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse)", "self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func)", "self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % '", "= ( '(%s' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) )", "return res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__:", "self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s", "in kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False): if not only_globals: res", "getattr(cls, 'instance', None) if not res: res = super().__new__(cls) cls.instance = res return", "self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left", "node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node):", "Universe: '''The class of the `this`:obj: object. The `this` object is simply a", "None if lower: res = '%s:' % lower else: res = ':' if", "in range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop())) for if_ in range(pop()):", "( '(%s' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if", "self.stack.pop(-1) if node.kwargs else '' call = ', '.join(args) if keywords: if call:", "import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject):", "= next from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return new_class class", "is not ') def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append('", "def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-')", "`query object`:term:, other types are a TypeError. ''' from types import GeneratorType if", "self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if", "visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not in ') def", "= self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else:", "reversed(node.elts): self.visit(elt) result = ( '(%s' % ', '.join( self.stack.pop(-1) for _ in", "import_object from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass =", "= self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else:", "__iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name:", "def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <= ') def", "__init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals =", "setattr(self, attr, val) def get_value(self, name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name,", "query expression. ''' from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled =", "kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False): if not only_globals: res =", "object from a predicate expression. ''' from xoutil.objects import import_object from .revenge import", "xoutil.decorator.meta import decorator from xotl.ql import interfaces class Universe: '''The class of the", "args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for", "'.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_)", "right = ''.join( # I assume each operator has spaces around it '%s%s'", "stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~') def", "import types from xoutil.symbols import Unset from xoutil.objects import memoized_property from collections import", "cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v):", "I'm picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ',", "for val in node.values: self.visit(val) exprs = [] for _ in range(len(node.values)): exprs.insert(0,", "self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1)", "visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self,", "def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+')", "a generator object) it is passed to `get_query_object`:func: along with all keyword arguments.", "+= '%s' % upper if step: res += ':%s' % step self.stack.append(res) def", "reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop", "attr, val) def get_value(self, name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset)", "lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' %", "'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst,", "> ') def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is", "as a decorator:: @thesefy class People: pass query = (who for who in", "xotl.ql import interfaces class Universe: '''The class of the `this`:obj: object. The `this`", "= [] for _ in range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop()))", "step: res += ':%s' % step self.stack.append(res) def visit_List(self, node): for elt in", "above will be equivalent to:: query = (who for who in (x for", "{} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else:", "get(self, key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0':", "query. ''' def __new__(cls): res = getattr(cls, 'instance', None) if not res: res", "predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from a predicate expression. ''' from", "if_ in range(pop()): # [*] pop the length of ifs lines.append('if %s' %", "def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from a query expression.", "res: res = super().__new__(cls) cls.instance = res return res def __getitem__(self, key): return", "save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines", "= ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) )", "= None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None if", "stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op", "''' import ast import types from xoutil.symbols import Unset from xoutil.objects import memoized_property", "'**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s)", "in node.values: self.visit(val) exprs = [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op", "object to participate in queries. Example as a wrapper:: class People: # ...", "_FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping): def", "def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' / ') def", "raise StopIteration __next__ = next from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta)", "lines = [] for _ in range(len(node.generators)): lines.append('for %s in %s' % (pop(),", "All rights reserved. # # This is free software; you can do what", "else: res = ':' if upper: res += '%s' % upper if step:", "self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append('", "if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper)", "raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node):", "provided ' 'is not: %r' % type(which)) return which @decorator def thesefy(target, make_subquery=True):", "def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' | ') def", "any(name in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword argument') self.expression =", "%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def", "[*] pop the length of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def", "v) for k, v in keywords) if starargs: if call: call += ',", "generator object) it is passed to `get_query_object`:func: along with all keyword arguments. If", "is False, `thesefy` injects an ``__iter__()`` that simply returns the same object and", "`thesefy`:func: as a decorator-returning function:: class Person: pass query = (x for x", "# or simply as a decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target,", "upper = self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1)", "query object from a query expression. ''' from xoutil.objects import import_object from xotl.ql.revenge", "'<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack =", "def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value)", "**kwargs): '''Ensure a query object. If `which` is a query expression (more precisely", "self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node):", "interfaces class Universe: '''The class of the `this`:obj: object. The `this` object is", "{ name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {}", "self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def", "%r at %r' % (stack, node) return stack.pop() def visit_And(self, node): self.stack.append(' and", "k, v in keywords) if starargs: if call: call += ', ' call", "'' kwargs = self.stack.pop(-1) if node.kwargs else '' call = ', '.join(args) if", "% (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self,", "node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines))", "def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop())", "visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def", "_ in range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt)", "FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs )", "stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' %", "**kwargs ) def normalize_query(which, **kwargs): '''Ensure a query object. If `which` is a", "') def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' % node.n)", "reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) )", "along with all keyword arguments. If `which` is not a query expression it", "def __iter__(self): return (x for x in this if isinstance(x, self)) else: def", "raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for attr, val in kwargs.items():", "= next this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value',", "else '' kwargs = self.stack.pop(-1) if node.kwargs else '' call = ', '.join(args)", "') def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^ ')", "return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack = [] self.visit(node)", "return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from a", "% ') def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' <<", "predicate object from a predicate expression. ''' from xoutil.objects import import_object from .revenge", "for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func", "uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or", "def thesefy(target, make_subquery=True): '''Allow an object to participate in queries. Example as a", "self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append('", "isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object provided ' 'is not: %r'", "StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups',", "self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id)", "self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def", "types from xoutil.symbols import Unset from xoutil.objects import memoized_property from collections import MappingView,", "if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower)", "reserved. # # This is free software; you can do what the LICENCE", "= Unset if res is Unset: res = self._frame.f_globals.get(name, Unset) if res is", "self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = [] for _ in range(len(node.generators)): lines.append('for", "If `target` already support the iterable protocol (i.e implement ``__iter__``), return it unchanged.", ".revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or", "= self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body, test, orelse)) def visit_Lambda(self,", "') def visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op)", "**kwargs): '''Get the query object from a query expression. ''' from xoutil.objects import", "with all keyword arguments. If `which` is not a query expression it must", "utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors # All", "def get(self, key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key ==", "for x in this if isinstance(x, self)) else: def __iter__(self): return self def", "key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else:", "= stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right))", "next this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst',", "kwargs = self.stack.pop(-1) if node.kwargs else '' call = ', '.join(args) if keywords:", "None) for attr, val in kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False):", "', '.join('%s=%s' % (k, v) for k, v in keywords) if starargs: if", ") def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result = ( '(%s'", "visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self,", "call: call += ', ' call += '**%s' % kwargs self.stack.append('%s(%s)' % (func,", "' 'is not: %r' % type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow", "who in (x for x in this if isinstance(x, People))) If `make_subquery` is", "node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join(", "% ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def visit_Tuple(self, node):", "range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1) if", "isinstance(x, People))) If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that simply returns", "def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join(", "injects an ``__iter__()`` that simply returns the same object and a ``next()`` method", "`target` already support the iterable protocol (i.e implement ``__iter__``), return it unchanged. If", "return self def __iter__(self): return self def next(self): raise StopIteration __next__ = next", "') def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is ')", "v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack = [] self.visit(node) assert", "left = stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op,", "_ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1))", "stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not", "in (x for x in this if isinstance(x, People))) If `make_subquery` is False,", "pass query = (x for x in thesefy(make_subquery=False)(Person)) # or simply as a", "in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I assume each operator", "_ in range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop())) for if_ in", "NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice)", "in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1),", "`thesefy` injects an ``__iter__()`` that simply returns the same object and a ``next()``", "collections import MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql import interfaces class", "self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators): for if_", "self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append('", "node.values: self.visit(val) exprs = [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op =", "(func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self,", "else %s)' % (body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self,", "for elt in reversed(node.elts): self.visit(elt) result = ( '(%s' % ', '.join( self.stack.pop(-1)", "% (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node)", "self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self, node):", "# Copyright (c) <NAME> [~º/~] and Contributors # All rights reserved. # #", "'.join(args) if keywords: if call: call += ', ' call += ', '.join('%s=%s'", "in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression',", "self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node):", "lower = None if lower: res = '%s:' % lower else: res =", "import memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql", "self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except KeyError:", "node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node):", "Example as a decorator:: @thesefy class People: pass query = (who for who", "else: return True def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key", "If `make_subquery` is True, then the query shown above will be equivalent to::", "op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1)", "return res def get(self, key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and", "key == '.0': return sub_query_or_value(res) else: return res def get(self, key, default=None): res", "expected, but object provided ' 'is not: %r' % type(which)) return which @decorator", "'%s:' % lower else: res = ':' if upper: res += '%s' %", "for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords))", "def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is not ')", "starargs if kwargs: if call: call += ', ' call += '**%s' %", "node.kwargs else '' call = ', '.join(args) if keywords: if call: call +=", "# [*] pop the length of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines))", "def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' - ') def", "None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None if lower:", "unchanged. If `make_subquery` is True, then the query shown above will be equivalent", "reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))]", "in a query. ''' def __new__(cls): res = getattr(cls, 'instance', None) if not", "iterable protocol (i.e implement ``__iter__``), return it unchanged. If `make_subquery` is True, then", "= stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node): self.stack.append(' + ')", "visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1)", "visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _", "range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop())) for if_ in range(pop()): #", "if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args):", "def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): # order does not really", "visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self,", "not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object provided ' 'is not:", "KeyError: return False else: return True def __getitem__(self, key): res = self._mapping[key] if", "def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' > ') def", "'''Get a predicate object from a predicate expression. ''' from xoutil.objects import import_object", "def visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step = None", "GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise", "(i.e implement ``__iter__``), return it unchanged. If `make_subquery` is True, then the query", "') def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is not", "except KeyError: return False else: return True def __getitem__(self, key): res = self._mapping[key]", "function:: class Person: pass query = (x for x in thesefy(make_subquery=False)(Person)) # or", "range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack", "res = self._frame.f_locals.get(name, Unset) else: res = Unset if res is Unset: res", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME>", "self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand =", "val) def get_value(self, name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset) else:", "lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators): for if_ in", "self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body, test, orelse)) def visit_Lambda(self, node):", "if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for", "... pass query = (who for who in thesefy(People)) Example as a decorator::", "if call: call += ', ' call += ', '.join('%s=%s' % (k, v)", "'(%s' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if len(node.elts)", "== '.0': return sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping) def _get_closure(obj):", "node): self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left) for op, expr in", "from a query expression. ''' from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled", "right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self,", "') def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >= ')", "isinstance(x, self)) else: def __iter__(self): return self def next(self): raise StopIteration __next__ =", "node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg", "_ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self,", "self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords", "is passed to `get_query_object`:func: along with all keyword arguments. If `which` is not", "generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals,", "def __getitem__(self, key): return self def __getattr__(self, name): return self def __iter__(self): return", "else: step = None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper =", "TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self,", "%s)' % (body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node):", "(self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self,", "import import_object from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass", "node): for elt in reversed(node.elts): self.visit(elt) result = ( '(%s' % ', '.join(", "self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner", "result = ( '(%s' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) )", "it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1),", "you call `thesefy`:func: as a decorator-returning function:: class Person: pass query = (x", "next(self): raise StopIteration __next__ = next from xoutil.objects import copy_class new_class = copy_class(target,", "def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' << ') def", "object`:term:, other types are a TypeError. ''' from types import GeneratorType if isinstance(which,", "self[key] except KeyError: return False else: return True def __getitem__(self, key): res =", "op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self,", "= (who for who in thesefy(People)) Example as a decorator:: @thesefy class People:", "or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in", "self.stack.append('(%s if %s else %s)' % (body, test, orelse)) def visit_Lambda(self, node): raise", "if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None if lower: res", "of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = [] for", "QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the old API.", "import import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType", "self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append('", "node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op", "return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder()", "from xoutil.objects import import_object from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass =", "reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs", "op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node): self.stack.append(' +", "= self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op,", "= stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~')", "'''Get the query object from a query expression. ''' from xoutil.objects import import_object", "# # This is free software; you can do what the LICENCE file", "visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop =", "__getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res)", "`which` is not a query expression it must be a `query object`:term:, other", "= self.stack = [] self.visit(node) assert len(stack) == 1, 'Remaining items %r at", "arguments. If `which` is not a query expression it must be a `query", "participate in queries. Example as a wrapper:: class People: # ... pass query", "node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for", "= self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body, test,", "copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries =", "QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the", "visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self,", "True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class", "def visit_Dict(self, node): # order does not really matter but I'm picky for", "object) it is passed to `get_query_object`:func: along with all keyword arguments. If `which`", "pop = lambda: self.stack.pop(-1) lines = [] for _ in range(len(node.generators)): lines.append('for %s", "= SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object", "lambda: self.stack.pop(-1) lines = [] for _ in range(len(node.generators)): lines.append('for %s in %s'", "-*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors", "& ') def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' //", "= Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type)", "from xoutil.objects import memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta import decorator", "%s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node):", "for elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts)))", "% (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def", "Contributors # All rights reserved. # # This is free software; you can", "self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append('", "right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in", "it unchanged. If `make_subquery` is True, then the query shown above will be", "PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure", "== ') def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' <", "to participate in queries. Example as a wrapper:: class People: # ... pass", "def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >= ') def", "self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def", "Unset: res = self._frame.f_globals.get(name, Unset) if res is not Unset: return res else:", "'get_value', 'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame,", "self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _", "'instance', None) if not res: res = super().__new__(cls) cls.instance = res return res", "``__iter__()`` that simply returns the same object and a ``next()`` method that immediately", "qst, _frame, **kwargs): self.qst = qst self._frame = _frame if any(name in RESERVED_ARGUMENTS", "in People) If `target` already support the iterable protocol (i.e implement ``__iter__``), return", "passed to `get_query_object`:func: along with all keyword arguments. If `which` is not a", "visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is not ') def", "self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append('", "if not res: res = super().__new__(cls) cls.instance = res return res def __getitem__(self,", "return which @decorator def thesefy(target, make_subquery=True): '''Allow an object to participate in queries.", "call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self, node):", "kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for attr, val in", "= [] self.visit(node) assert len(stack) == 1, 'Remaining items %r at %r' %", "or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs):", "_get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for name, cell", "def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node):", "If `which` is not a query expression it must be a `query object`:term:,", "node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand", "self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res", "node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda:", "thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False) class Person: pass ''' if", "self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append('", "the iterable protocol (i.e implement ``__iter__``), return it unchanged. If `make_subquery` is True,", "''' if getattr(target, '__iter__', None): return target class new_meta(type(target)): if make_subquery: def __iter__(self):", "gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the old API. these = get_query_object", "LICENCE file allows you to. # '''The query language core. ''' import ast", "do what the LICENCE file allows you to. # '''The query language core.", "return { name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return", "ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = [] for _", "types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def", "node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node):", "kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self, attr, val) def get_value(self, name,", "def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test)", "elt in reversed(node.elts): self.visit(elt) result = ( '(%s' % ', '.join( self.stack.pop(-1) for", "self.expression = kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self, attr, val) def", "import interfaces class Universe: '''The class of the `this`:obj: object. The `this` object", "else: def __iter__(self): return self def next(self): raise StopIteration __next__ = next from", "or simply as a decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__',", "def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val)", "length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = []", ") # Alias to the old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject',", "is ') def visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self, node): self.stack.append('", "= getattr(cls, 'instance', None) if not res: res = super().__new__(cls) cls.instance = res", "can be drawn in a query. ''' def __new__(cls): res = getattr(cls, 'instance',", "') def visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left) for", "% lower else: res = ':' if upper: res += '%s' % upper", "at %r' % (stack, node) return stack.pop() def visit_And(self, node): self.stack.append(' and ')", "allows you to. # '''The query language core. ''' import ast import types", "and a ``next()`` method that immediately stops the iteration. Notice that in order", "def _visit_generators(self, node): for comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs))", "self.stack.append(len(comp.ifs)) # save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda:", "decorator-returning function:: class Person: pass query = (x for x in thesefy(make_subquery=False)(Person)) #", "upper = None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None", "pop())) for if_ in range(pop()): # [*] pop the length of ifs lines.append('if", "that immediately stops the iteration. Notice that in order to use `make_subquery` you", "_visit_generators(self, node): for comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) #", "self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( #", "False, `thesefy` injects an ``__iter__()`` that simply returns the same object and a", "self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s:", "this if isinstance(x, self)) else: def __iter__(self): return self def next(self): raise StopIteration", "self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for", "def next(self): raise StopIteration __next__ = next from xoutil.objects import copy_class new_class =", "you can do what the LICENCE file allows you to. # '''The query", "visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step = None if", "= stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node):", "visit_Dict(self, node): # order does not really matter but I'm picky for k,", "self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args =", "query expression (more precisely a generator object) it is passed to `get_query_object`:func: along", "+= ', ' call += ', '.join('%s=%s' % (k, v) for k, v", "node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val) exprs =", "is not a query expression it must be a `query object`:term:, other types", "self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)' %", "has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) )", "node): self.stack.append(' is not ') def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self,", "self.stack.pop(-1) lines = [] for _ in range(len(node.generators)): lines.append('for %s in %s' %", "self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str", "for if_ in range(pop()): # [*] pop the length of ifs lines.append('if %s'", "__new__(cls): res = getattr(cls, 'instance', None) if not res: res = super().__new__(cls) cls.instance", "from collections import MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql import interfaces", "if res is not Unset: return res else: raise NameError(name) @memoized_property def locals(self):", "def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op)", "[self.stack.pop(-1) for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in", "self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines =", "uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass(", "visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' == ') def", "':%s' % step self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append(", "the LICENCE file allows you to. # '''The query language core. ''' import", "pass query = (who for who in thesefy(People)) Example as a decorator:: @thesefy", "self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self, node):", "', ' call += ', '.join('%s=%s' % (k, v) for k, v in", "return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but", "def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals", "__iter__(self): return self def next(self): raise StopIteration __next__ = next from xoutil.objects import", "to use `make_subquery` you call `thesefy`:func: as a decorator-returning function:: class Person: pass", "operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node):", "self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1)", "') def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' % ')", "'%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody)", "shown above will be equivalent to:: query = (who for who in (x", "not Unset: return res else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property", "[*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = [] for _ in", "[] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs))", "if step: res += ':%s' % step self.stack.append(res) def visit_List(self, node): for elt", "stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ')", "* ') def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' %", "_frame if any(name in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword argument')", "Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals)", "who in thesefy(People)) Example as a decorator:: @thesefy class People: pass query =", "are a TypeError. ''' from types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which,", "is True, then the query shown above will be equivalent to:: query =", "name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v): if isinstance(v,", "def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop =", "if keywords: if call: call += ', ' call += ', '.join('%s=%s' %", "QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst = qst self._frame", "else '' call = ', '.join(args) if keywords: if call: call += ',", "`which` is a query expression (more precisely a generator object) it is passed", "def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' != ') def", "def visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self, node): self.stack.append(' in ')", "# -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and", "it must be a `query object`:term:, other types are a TypeError. ''' from", "', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def visit_Tuple(self, node): for", "order does not really matter but I'm picky for k, v in reversed(zip(node.keys,", "= '%s:' % lower else: res = ':' if upper: res += '%s'", "starargs: if call: call += ', ' call += '*%s' % starargs if", "== '.0': return sub_query_or_value(res) else: return res def get(self, key, default=None): res =", "node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for", "Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class", "expression=generator, **kwargs ) # Alias to the old API. these = get_query_object def", "self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def __iter__(self): return", "self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node):", "a query. ''' def __new__(cls): res = getattr(cls, 'instance', None) if not res:", "'.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' ==", "**kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner =", "simply as a decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__', None):", "sub_query_or_value(res) else: return res def get(self, key, default=None): res = self._mapping.get(key, default) if", "'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def", "class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst = qst", "in keywords) if starargs: if call: call += ', ' call += '*%s'", "self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left,", "a query object. If `which` is a query expression (more precisely a generator", "node Yield') def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' !=", "= qst self._frame = _frame if any(name in RESERVED_ARGUMENTS for name in kwargs):", "in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for _ in", "def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from a predicate expression.", "% kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes", "'''Allow an object to participate in queries. Example as a wrapper:: class People:", "self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None,", "', ' call += '**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self,", "node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node):", "= self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1),", "% step self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]'", "kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func =", "''.join( # I assume each operator has spaces around it '%s%s' % (self.stack.pop(-1),", "None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None if node.lower:", "res = Unset if res is Unset: res = self._frame.f_globals.get(name, Unset) if res", "node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self,", "is not Unset: return res else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals", "= self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)'", "import Unset from xoutil.objects import memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta", "self.stack.append(' is not ') def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node):", "People) If `target` already support the iterable protocol (i.e implement ``__iter__``), return it", "get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from a query expression. '''", "# --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors # All rights reserved.", "k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s'", "TypeError('Query object expected, but object provided ' 'is not: %r' % type(which)) return", "= self._frame.f_locals.get(name, Unset) else: res = Unset if res is Unset: res =", "`this`:obj: object. The `this` object is simply a name from which objects can", "really matter but I'm picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k)", "returns the same object and a ``next()`` method that immediately stops the iteration.", "dictbody) def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1)", "Example as a wrapper:: class People: # ... pass query = (who for", "node): for elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in", "to `get_query_object`:func: along with all keyword arguments. If `which` is not a query", "orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): # order does not", "visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self,", "node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)'", "''' from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame", "call += '*%s' % starargs if kwargs: if call: call += ', '", "queries. Example as a wrapper:: class People: # ... pass query = (who", "Mapping): def __contains__(self, key): try: self[key] except KeyError: return False else: return True", "node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg)", "(self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step", "'%s' % upper if step: res += ':%s' % step self.stack.append(res) def visit_List(self,", "self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append('", "in reversed(node.elts): self.visit(elt) result = ( '(%s' % ', '.join( self.stack.pop(-1) for _", "for x in this if isinstance(x, People))) If `make_subquery` is False, `thesefy` injects", "the length of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node):", "__next__ = next from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return new_class", "node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node):", "= ''.join( # I assume each operator has spaces around it '%s%s' %", "upper: res += '%s' % upper if step: res += ':%s' % step", "lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield')", "from xotl.ql import interfaces class Universe: '''The class of the `this`:obj: object. The", "node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1)", "in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target)", "visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def", "= ':' if upper: res += '%s' % upper if step: res +=", "return sub_query_or_value(res) else: return res def get(self, key, default=None): res = self._mapping.get(key, default)", "if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v class", "(k, v) for k, v in keywords) if starargs: if call: call +=", "') def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' < ')", "visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]'", "core. ''' import ast import types from xoutil.symbols import Unset from xoutil.objects import", "FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a query object. If", "lower else: res = ':' if upper: res += '%s' % upper if", "Person: pass query = (x for x in thesefy(make_subquery=False)(Person)) # or simply as", "self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords =", "SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack = [] self.visit(node) assert len(stack) ==", "the same object and a ``next()`` method that immediately stops the iteration. Notice", "['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self,", "argument') self.expression = kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self, attr, val)", "= visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' %", "else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals", "def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' %", "meta=new_meta) return new_class class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries',", "node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node):", "stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node): self.stack.append('", "precisely a generator object) it is passed to `get_query_object`:func: along with all keyword", "query = (who for who in thesefy(People)) Example as a decorator:: @thesefy class", "False else: return True def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and", "= res return res def __getitem__(self, key): return self def __getattr__(self, name): return", "qst self._frame = _frame if any(name in RESERVED_ARGUMENTS for name in kwargs): raise", "in %s' % (pop(), pop())) for if_ in range(pop()): # [*] pop the", "the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines =", "Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type", "def visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left) for op,", "simply a name from which objects can be drawn in a query. '''", "= kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner =", "range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if", "be drawn in a query. ''' def __new__(cls): res = getattr(cls, 'instance', None)", "only_globals: res = self._frame.f_locals.get(name, Unset) else: res = Unset if res is Unset:", "res = '%s:' % lower else: res = ':' if upper: res +=", "node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node):", "return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject',", "visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand)", "new_class class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals", "in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and", "visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self,", "< ') def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' >", "'' call = ', '.join(args) if keywords: if call: call += ', '", "self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left)", "visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right =", "[~º/~] and Contributors # All rights reserved. # # This is free software;", "software; you can do what the LICENCE file allows you to. # '''The", "def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' % ') def", "reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1))", "predicate expression. ''' from xoutil.objects import import_object from .revenge import Uncompyled uncompiled =", "self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append('", ">= ') def visit_Is(self, node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is", "in range(pop()): # [*] pop the length of ifs lines.append('if %s' % pop())", "query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from a query expression. ''' from", "`make_subquery` you call `thesefy`:func: as a decorator-returning function:: class Person: pass query =", "call `thesefy`:func: as a decorator-returning function:: class Person: pass query = (x for", "= kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self, attr, val) def get_value(self,", "`this` object is simply a name from which objects can be drawn in", "node.step: self.visit(node.step) step = self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper) upper", "People: pass query = (who for who in People) If `target` already support", "self)) else: def __iter__(self): return self def next(self): raise StopIteration __next__ = next", "self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines", "for who in thesefy(People)) Example as a decorator:: @thesefy class People: pass query", "class Universe: '''The class of the `this`:obj: object. The `this` object is simply", "self.qst = qst self._frame = _frame if any(name in RESERVED_ARGUMENTS for name in", "object expected, but object provided ' 'is not: %r' % type(which)) return which", "other types are a TypeError. ''' from types import GeneratorType if isinstance(which, GeneratorType):", "types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which,", "node) return stack.pop() def visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append('", "self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1)))", "self._frame.f_locals.get(name, Unset) else: res = Unset if res is Unset: res = self._frame.f_globals.get(name,", "if call: call += ', ' call += '*%s' % starargs if kwargs:", "reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I assume each operator has", "self._frame = _frame if any(name in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid", "= (x for x in thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False)", "def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords):", "memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql import", "return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the old", "') def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack = self.stack", "default) if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def", "self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def get(self, key,", "in thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False) class Person: pass '''", "@memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst)", "self._frame.f_globals.get(name, Unset) if res is not Unset: return res else: raise NameError(name) @memoized_property", "= [self.stack.pop(-1) for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _", "+= '*%s' % starargs if kwargs: if call: call += ', ' call", "= import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator,", "' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt)", "an object to participate in queries. Example as a wrapper:: class People: #", "RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject:", "get_source(self, node): stack = self.stack = [] self.visit(node) assert len(stack) == 1, 'Remaining", "cls.instance = res return res def __getitem__(self, key): return self def __getattr__(self, name):", "x in thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False) class Person: pass", "implement ``__iter__``), return it unchanged. If `make_subquery` is True, then the query shown", "visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self,", "self.visit(val) exprs = [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1)", "xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame", "node): raise NotImplementedError() def visit_Dict(self, node): # order does not really matter but", "keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1)", "visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def", "for who in People) If `target` already support the iterable protocol (i.e implement", "node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None if lower: res =", "expression. ''' from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator)", "node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node):", "!= ') def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <=", "def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return", "def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result = ( '(%s' %", "len(stack) == 1, 'Remaining items %r at %r' % (stack, node) return stack.pop()", "') def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' | ')", "for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s:", "GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected,", "only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset) else: res = Unset if", "def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op =", "return res def __getitem__(self, key): return self def __getattr__(self, name): return self def", "Unset if res is Unset: res = self._frame.f_globals.get(name, Unset) if res is not", "node): self.visit(node.op) for val in node.values: self.visit(val) exprs = [] for _ in", "a query expression it must be a `query object`:term:, other types are a", "func = self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords = [", "from which objects can be drawn in a query. ''' def __new__(cls): res", "`get_query_object`:func: along with all keyword arguments. If `which` is not a query expression", "obj.__closure__: return { name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else:", "self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop =", "Person: pass ''' if getattr(target, '__iter__', None): return target class new_meta(type(target)): if make_subquery:", "self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators): for if_ in reversed(comp.ifs):", "return self def __getattr__(self, name): return self def __iter__(self): return self def next(self):", "import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type)", "self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key)", "def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop", "res else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self): return", "') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in node.values:", "self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append('", "node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body", "pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self, node):", "a predicate expression. ''' from xoutil.objects import import_object from .revenge import Uncompyled uncompiled", "') def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' // ')", "+= ':%s' % step self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt)", "stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def", "if any(name in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword argument') self.expression", "in ') def visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left)", "def get_source(self, node): stack = self.stack = [] self.visit(node) assert len(stack) == 1,", "return it unchanged. If `make_subquery` is True, then the query shown above will", "it is passed to `get_query_object`:func: along with all keyword arguments. If `which` is", "''' from xoutil.objects import import_object from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass", "to. # '''The query language core. ''' import ast import types from xoutil.symbols", "lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def", "**kwargs): self.qst = qst self._frame = _frame if any(name in RESERVED_ARGUMENTS for name", "PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a query", "func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a query object. If `which`", "self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def", "= self.stack.pop(-1) if node.kwargs else '' call = ', '.join(args) if keywords: if", "object from a query expression. ''' from xoutil.objects import import_object from xotl.ql.revenge import", "in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs", "next(self): raise StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS = ( 'limit',", "self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body =", "for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs [*]", "node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value)", "spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s'", "self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs)", "gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType(", "self.visit(node) assert len(stack) == 1, 'Remaining items %r at %r' % (stack, node)", "visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right =", "call += ', '.join('%s=%s' % (k, v) for k, v in keywords) if", "RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None)", "call = ', '.join(args) if keywords: if call: call += ', ' call", "node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node):", "res return res def __getitem__(self, key): return self def __getattr__(self, name): return self", "The `this` object is simply a name from which objects can be drawn", "xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self,", "elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _ in", "for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _", "call += ', ' call += ', '.join('%s=%s' % (k, v) for k,", "self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return", "def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr))", "visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self,", "= lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}'", "= self._frame.f_globals.get(name, Unset) if res is not Unset: return res else: raise NameError(name)", "= generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst,", "import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs", "import ast import types from xoutil.symbols import Unset from xoutil.objects import memoized_property from", "name from which objects can be drawn in a query. ''' def __new__(cls):", "% starargs if kwargs: if call: call += ', ' call += '**%s'", "not res: res = super().__new__(cls) cls.instance = res return res def __getitem__(self, key):", "= ', '.join(args) if keywords: if call: call += ', ' call +=", "_FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except KeyError: return False else: return", "if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def __iter__(self):", "node): stack = self.stack = [] self.visit(node) assert len(stack) == 1, 'Remaining items", "node): self.stack.append(' is ') def visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self,", "in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of", "(more precisely a generator object) it is passed to `get_query_object`:func: along with all", "_visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append('", "Mapping from xoutil.decorator.meta import decorator from xotl.ql import interfaces class Universe: '''The class", "self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)'", "visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1),", "object is simply a name from which objects can be drawn in a", "setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self,", "res = self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return", "= [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if", "% upper if step: res += ':%s' % step self.stack.append(res) def visit_List(self, node):", "visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def", "this if isinstance(x, People))) If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that", "self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append('", "def __getattr__(self, name): return self def __iter__(self): return self def next(self): raise StopIteration", "not ') def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not", "xoutil.symbols import Unset from xoutil.objects import memoized_property from collections import MappingView, Mapping from", "def __iter__(self): return self def next(self): raise StopIteration __next__ = next this =", "= _frame if any(name in RESERVED_ARGUMENTS for name in kwargs): raise TypeError('Invalid keyword", "protocol (i.e implement ``__iter__``), return it unchanged. If `make_subquery` is True, then the", "around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' %", "== '<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack", "iteration. Notice that in order to use `make_subquery` you call `thesefy`:func: as a", "res = self._frame.f_globals.get(name, Unset) if res is not Unset: return res else: raise", "arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for _", "def visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right", "%r' % type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow an object to", "comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length", "kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes =", "attr, val in kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False): if not", ") self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody", "range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs =", "If `which` is a query expression (more precisely a generator object) it is", "return stack.pop() def visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or", "assume each operator has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _", "objects can be drawn in a query. ''' def __new__(cls): res = getattr(cls,", "node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self,", "method that immediately stops the iteration. Notice that in order to use `make_subquery`", "This is free software; you can do what the LICENCE file allows you", "class of the `this`:obj: object. The `this` object is simply a name from", "visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else:", "def normalize_query(which, **kwargs): '''Ensure a query object. If `which` is a query expression", "a name from which objects can be drawn in a query. ''' def", "you to. # '''The query language core. ''' import ast import types from", "self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s%s)' %", "Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return", "% op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left =", ") def normalize_query(which, **kwargs): '''Ensure a query object. If `which` is a query", "visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): # order does not really matter", "= (who for who in People) If `target` already support the iterable protocol", "% (left, op, right)) def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node):", "visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left) for op, expr", "in ') def visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr)", "import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType =", "name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def", "make_subquery=True): '''Allow an object to participate in queries. Example as a wrapper:: class", "query object. If `which` is a query expression (more precisely a generator object)", "self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self,", "class People: # ... pass query = (who for who in thesefy(People)) Example", "res = ':' if upper: res += '%s' % upper if step: res", "reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody)", "self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in", "Unset from xoutil.objects import memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta import", "copy_class new_class = copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self, locals, globals,", "FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the old API. these =", "not really matter but I'm picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v)", "NotImplementedError() def visit_Dict(self, node): # order does not really matter but I'm picky", "sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType)", "pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self,", "self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self,", "an ``__iter__()`` that simply returns the same object and a ``next()`` method that", "= self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return", "'''The class of the `this`:obj: object. The `this` object is simply a name", "self.visit(node.step) step = self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper) upper =", "Unset: return res else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def", "op, right)) def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' -", "if lower: res = '%s:' % lower else: res = ':' if upper:", "@thesefy class People: pass query = (who for who in People) If `target`", "v in keywords) if starargs: if call: call += ', ' call +=", "self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self,", "for name in kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for", "] starargs = self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs", "def visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or ') def", "# save the length of ifs [*] self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1)", "(pop(), pop())) for if_ in range(pop()): # [*] pop the length of ifs", ") class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst =", "__iter__(self): return self def next(self): raise StopIteration __next__ = next this = Universe()", "% (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def", "'_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst", "Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func),", "node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1)", "super().__new__(cls) cls.instance = res return res def __getitem__(self, key): return self def __getattr__(self,", "for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt", "object provided ' 'is not: %r' % type(which)) return which @decorator def thesefy(target,", "self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack = self.stack", "``__iter__``), return it unchanged. If `make_subquery` is True, then the query shown above", "for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v): if", "key == '.0': return sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping) def", "globals(self): return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator,", "FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs )", "new_class = copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self, locals, globals, **kwargs):", "__init__(self, qst, _frame, **kwargs): self.qst = qst self._frame = _frame if any(name in", "(self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else", "import copy_class new_class = copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self, locals,", "for attr, val in kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False): if", "these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from", "return res else: raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self):", "and Contributors # All rights reserved. # # This is free software; you", "'.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop", "keywords) if starargs: if call: call += ', ' call += '*%s' %", "self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView,", "', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node)", "in kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for attr, val", "= self.stack.pop(-1) else: lower = None if lower: res = '%s:' % lower", "% ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node):", "and ') def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def", "'.0': return sub_query_or_value(res) else: return res def get(self, key, default=None): res = self._mapping.get(key,", "`make_subquery` is False, `thesefy` injects an ``__iter__()`` that simply returns the same object", "res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else:", "_FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self, key): try:", "return self def next(self): raise StopIteration __next__ = next from xoutil.objects import copy_class", "') def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' > ')", "[] self.visit(node) assert len(stack) == 1, 'Remaining items %r at %r' % (stack,", "StopIteration __next__ = next from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return", "PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__),", "_ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else '' kwargs =", "object and a ``next()`` method that immediately stops the iteration. Notice that in", "node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' %", "for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else '' kwargs", "self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper", "for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' %", "self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack", "node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node):", "def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s: %s'", "<NAME> [~º/~] and Contributors # All rights reserved. # # This is free", "lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines))", "pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' %", "__getitem__(self, key): return self def __getattr__(self, name): return self def __iter__(self): return self", "query expression it must be a `query object`:term:, other types are a TypeError.", "(who for who in thesefy(People)) Example as a decorator:: @thesefy class People: pass", "starargs = self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs else", "key): try: self[key] except KeyError: return False else: return True def __getitem__(self, key):", "Alias to the old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs):", "from a predicate expression. ''' from xoutil.objects import import_object from .revenge import Uncompyled", "TypeError. ''' from types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else:", "key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0': return", "from types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not", "expression it must be a `query object`:term:, other types are a TypeError. '''", "self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs else '' call", "if len(node.elts) == 1: result += ', )' else: result += ')' self.stack.append(result)", "visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s'", "length of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise", "thesefy(target, make_subquery=True): '''Allow an object to participate in queries. Example as a wrapper::", "| ') def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^", "% self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()]", "who in People) If `target` already support the iterable protocol (i.e implement ``__iter__``),", "keyword arguments. If `which` is not a query expression it must be a", "= self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs else ''", "def visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val) exprs = [] for", "not in ') def visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops, node.comparators)):", "visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body)", "a decorator-returning function:: class Person: pass query = (x for x in thesefy(make_subquery=False)(Person))", "is Unset: res = self._frame.f_globals.get(name, Unset) if res is not Unset: return res", "= ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node):", "in range(len(node.elts)) ) ) if len(node.elts) == 1: result += ', )' else:", "expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I assume each", "get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object", "= _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self, key):", "from xoutil.symbols import Unset from xoutil.objects import memoized_property from collections import MappingView, Mapping", "in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1)", "= import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func,", "order to use `make_subquery` you call `thesefy`:func: as a decorator-returning function:: class Person:", "builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query", "else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack = []", ") self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs:", "-*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors # All rights", "# This is free software; you can do what the LICENCE file allows", "def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self, node): self.stack.append(' is ') def", "res = super().__new__(cls) cls.instance = res return res def __getitem__(self, key): return self", "step = self.stack.pop(-1) else: step = None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1)", "'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self,", "``next()`` method that immediately stops the iteration. Notice that in order to use", "'.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' %", "in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs)", "support the iterable protocol (i.e implement ``__iter__``), return it unchanged. If `make_subquery` is", "%r' % (stack, node) return stack.pop() def visit_And(self, node): self.stack.append(' and ') def", "node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node):", "from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return new_class class Frame: def", "= self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right)", "call += '**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r'", "= ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type", "= copy_class(target, meta=new_meta) return new_class class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries", "= get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from a", "'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame'", "def get_value(self, name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset) else: res", "def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' & ') def", "a ``next()`` method that immediately stops the iteration. Notice that in order to", "node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node):", "self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step:", "self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append('", "does not really matter but I'm picky for k, v in reversed(zip(node.keys, node.values)):", "name in kwargs): raise TypeError('Invalid keyword argument') self.expression = kwargs.pop('expression', None) for attr,", "self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def", "= self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op =", "') def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' * ')", "call: call += ', ' call += '*%s' % starargs if kwargs: if", "uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to the old API. these", "''' def __new__(cls): res = getattr(cls, 'instance', None) if not res: res =", "(x for x in this if isinstance(x, People))) If `make_subquery` is False, `thesefy`", "step = None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None", "the query object from a query expression. ''' from xoutil.objects import import_object from", "res += '%s' % upper if step: res += ':%s' % step self.stack.append(res)", "import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs", "call += ', ' call += '*%s' % starargs if kwargs: if call:", "name): return self def __iter__(self): return self def next(self): raise StopIteration __next__ =", "assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for name, cell in", "import decorator from xotl.ql import interfaces class Universe: '''The class of the `this`:obj:", "return target class new_meta(type(target)): if make_subquery: def __iter__(self): return (x for x in", "', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}'", "self.stack.pop(-1) else: lower = None if lower: res = '%s:' % lower else:", "class People: pass query = (who for who in People) If `target` already", "pass ''' if getattr(target, '__iter__', None): return target class new_meta(type(target)): if make_subquery: def", "op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I assume", "self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body, test, orelse))", "make_subquery: def __iter__(self): return (x for x in this if isinstance(x, self)) else:", ") ) def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result = (", "visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >= ') def visit_Is(self,", "right)) def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' - ')", "node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I assume each operator has spaces", "def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1)", "if kwargs: if call: call += ', ' call += '**%s' % kwargs", "return self def next(self): raise StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS", "for _ in range(len(node.elts)) ) ) if len(node.elts) == 1: result += ',", "expression (more precisely a generator object) it is passed to `get_query_object`:func: along with", "= Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' )", "% self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines =", "= import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) #", "= (who for who in (x for x in this if isinstance(x, People)))", "the query shown above will be equivalent to:: query = (who for who", "body = self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else", "but I'm picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody =", "% ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if len(node.elts) ==", "( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame' ) class QueryObject: frame_type =", "assert len(stack) == 1, 'Remaining items %r at %r' % (stack, node) return", "raise NotImplementedError() def visit_Dict(self, node): # order does not really matter but I'm", "'''Ensure a query object. If `which` is a query expression (more precisely a", "lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in", "Notice that in order to use `make_subquery` you call `thesefy`:func: as a decorator-returning", "not a query expression it must be a `query object`:term:, other types are", "def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not in ')", "self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand))", "API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object", "xoutil.objects import import_object from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type)", "self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step =", "% (k, v) for k, v in keywords) if starargs: if call: call", "__next__ = next this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order',", "visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self,", "which objects can be drawn in a query. ''' def __new__(cls): res =", "object. If `which` is a query expression (more precisely a generator object) it", "node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node):", "language core. ''' import ast import types from xoutil.symbols import Unset from xoutil.objects", "node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for", "self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1)", "from .revenge import Uncompyled uncompiled = Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type", "import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias", "if call: call += ', ' call += '**%s' % kwargs self.stack.append('%s(%s)' %", "pass query = (who for who in People) If `target` already support the", ") if len(node.elts) == 1: result += ', )' else: result += ')'", "x in this if isinstance(x, People))) If `make_subquery` is False, `thesefy` injects an", "python3 # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~]", "= None if lower: res = '%s:' % lower else: res = ':'", "if make_subquery: def __iter__(self): return (x for x in this if isinstance(x, self))", "**kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object provided", "isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars,", "node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node):", "@memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs):", "True def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0':", "op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack = self.stack self.visit(node.op)", "self def next(self): raise StopIteration __next__ = next from xoutil.objects import copy_class new_class", "self.visit(k) dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in", "in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts))", "in this if isinstance(x, self)) else: def __iter__(self): return self def next(self): raise", "then the query shown above will be equivalent to:: query = (who for", "frame_type=None, **kwargs): '''Get the query object from a query expression. ''' from xoutil.objects", "class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except KeyError: return False else:", "import MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql import interfaces class Universe:", "must be a `query object`:term:, other types are a TypeError. ''' from types", "if isinstance(x, People))) If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that simply", "class Person: pass ''' if getattr(target, '__iter__', None): return target class new_meta(type(target)): if", "in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else '' kwargs = self.stack.pop(-1)", "self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val", "else: res = Unset if res is Unset: res = self._frame.f_globals.get(name, Unset) if", "self def next(self): raise StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS =", "normalize_query(which, **kwargs): '''Ensure a query object. If `which` is a query expression (more", "elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}'", "Unset) else: res = Unset if res is Unset: res = self._frame.f_globals.get(name, Unset)", "node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node):", "return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a", "sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v", "self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append('", "[pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators): for", "step self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' %", "keywords: if call: call += ', ' call += ', '.join('%s=%s' % (k,", "% (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for", "raise StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset',", "and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self,", "node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError def", "def __contains__(self, key): try: self[key] except KeyError: return False else: return True def", "res = getattr(cls, 'instance', None) if not res: res = super().__new__(cls) cls.instance =", "a query expression. ''' from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled", "self.visit(elt) result = ( '(%s' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts))", "= [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp in reversed(node.generators):", "pop the length of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self,", "TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append('", "return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v)", "'''The query language core. ''' import ast import types from xoutil.symbols import Unset", "ast import types from xoutil.symbols import Unset from xoutil.objects import memoized_property from collections", "iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for", "if obj.__closure__: return { name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__) }", "self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt in", "and key == '.0': return sub_query_or_value(res) else: return res def get(self, key, default=None):", "ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node", "orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body, test, orelse)) def", "__contains__(self, key): try: self[key] except KeyError: return False else: return True def __getitem__(self,", "visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self,", "for k, v in keywords) if starargs: if call: call += ', '", "@memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self):", "get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack =", "res += ':%s' % step self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts):", "a predicate object from a predicate expression. ''' from xoutil.objects import import_object from", "self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower) lower =", "old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate", "as a wrapper:: class People: # ... pass query = (who for who", "range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self,", "but object provided ' 'is not: %r' % type(which)) return which @decorator def", "stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self,", "% type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow an object to participate", "node): raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self,", "of the `this`:obj: object. The `this` object is simply a name from which", "self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node)", "node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs else '' call = ',", "items %r at %r' % (stack, node) return stack.pop() def visit_And(self, node): self.stack.append('", "can do what the LICENCE file allows you to. # '''The query language", "% pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield') def visit_Eq(self,", "picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join(", "visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError", "in this if isinstance(x, People))) If `make_subquery` is False, `thesefy` injects an ``__iter__()``", "'*%s' % starargs if kwargs: if call: call += ', ' call +=", "# I assume each operator has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1))", "@decorator def thesefy(target, make_subquery=True): '''Allow an object to participate in queries. Example as", "target class new_meta(type(target)): if make_subquery: def __iter__(self): return (x for x in this", "node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node):", "lines.append('for %s in %s' % (pop(), pop())) for if_ in range(pop()): # [*]", "(body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): # order", "+= '**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node): self.stack.append('%r' %", "def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >> ') def", "') def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' ** ')", "node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self,", "a `query object`:term:, other types are a TypeError. ''' from types import GeneratorType", "query = (x for x in thesefy(make_subquery=False)(Person)) # or simply as a decorator", "node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not in ') def visit_Compare(self,", "% (func, call)) def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def", "predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a query object. If `which` is", "v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody = ', '.join( '%s: %s' %", "decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__', None): return target class", "node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1)", "import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which,", "self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self,", "in thesefy(People)) Example as a decorator:: @thesefy class People: pass query = (who", "'[%s]' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def visit_Tuple(self,", "I assume each operator has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for", "val in node.values: self.visit(val) exprs = [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1))", "if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res def get(self,", "visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self,", "self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody =", "<= ') def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append(' >=", "- ') def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' /", "in queries. Example as a wrapper:: class People: # ... pass query =", "as a decorator-returning function:: class Person: pass query = (x for x in", "free software; you can do what the LICENCE file allows you to. #", "= None if node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None if", "% (body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): #", "source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the", "/ ') def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' **", "(self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node):", "interfaces.QueryObject): raise TypeError('Query object expected, but object provided ' 'is not: %r' %", "get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from a predicate", "for who in (x for x in this if isinstance(x, People))) If `make_subquery`", ">> ') def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' &", "def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self, node): self.stack.append(' ** ') def", "from xoutil.objects import import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame =", "% dictbody) def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody = ',", "<< ') def visit_RShift(self, node): self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' |", "visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result = ( '(%s' % ',", "def __init__(self, qst, _frame, **kwargs): self.qst = qst self._frame = _frame if any(name", "pop = lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop())", "= stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node):", "cell in zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType)", "'.0': return sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert", "'.join('%s=%s' % (k, v) for k, v in keywords) if starargs: if call:", "visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self,", "is a query expression (more precisely a generator object) it is passed to", "next from xoutil.objects import copy_class new_class = copy_class(target, meta=new_meta) return new_class class Frame:", "= 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst = qst self._frame = _frame", "query shown above will be equivalent to:: query = (who for who in", "each operator has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in", "zip(obj.__code__.co_freevars, obj.__closure__) } else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name", "'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst = qst self._frame = _frame if", "= self class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except KeyError: return", "decorator from xotl.ql import interfaces class Universe: '''The class of the `this`:obj: object.", "`make_subquery` is True, then the query shown above will be equivalent to:: query", "if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object provided ' 'is", "'.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt", "a decorator:: @thesefy class People: pass query = (who for who in People)", "self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val) exprs = []", "** ') def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >>", "if isinstance(x, self)) else: def __iter__(self): return self def next(self): raise StopIteration __next__", "Copyright (c) <NAME> [~º/~] and Contributors # All rights reserved. # # This", "= Uncompyled(func) PredicateClass = import_object(predicate_type) FrameClass = import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst,", "type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow an object to participate in", "not: %r' % type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow an object", "exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node): stack =", "[ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs", "'is not: %r' % type(which)) return which @decorator def thesefy(target, make_subquery=True): '''Allow an", "in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def visit_BinOp(self, node):", "operand = stack.pop(-1) op = stack.pop(-1) stack.append('(%s%s)' % (op, operand)) def visit_Invert(self, node):", "':' if upper: res += '%s' % upper if step: res += ':%s'", "for _ in range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop())) for if_", "# order does not really matter but I'm picky for k, v in", "node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test =", "% (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step)", "def visit_BinOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right", "%s else %s)' % (body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def", "% (op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def", "return new_class class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True)", "self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append('", "node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node):", "query = (who for who in (x for x in this if isinstance(x,", "def globals(self): return self._frame.f_globals @memoized_property def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def", "', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if len(node.elts) == 1:", "and key == '.0': return sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping)", "is simply a name from which objects can be drawn in a query.", "node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step = None if node.upper:", "types are a TypeError. ''' from types import GeneratorType if isinstance(which, GeneratorType): return", "val in kwargs.items(): setattr(self, attr, val) def get_value(self, name, only_globals=False): if not only_globals:", "// ') def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self, node): stack =", "a wrapper:: class People: # ... pass query = (who for who in", "self.stack = [] self.visit(node) assert len(stack) == 1, 'Remaining items %r at %r'", "if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query", "visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda:", "def visit_NotEq(self, node): self.stack.append(' != ') def visit_Lt(self, node): self.stack.append(' < ') def", "self.visit(node.op) for val in node.values: self.visit(val) exprs = [] for _ in range(len(node.values)):", "True, then the query shown above will be equivalent to:: query = (who", "if upper: res += '%s' % upper if step: res += ':%s' %", "Unset) if res is not Unset: return res else: raise NameError(name) @memoized_property def", "__iter__(self): return (x for x in this if isinstance(x, self)) else: def __iter__(self):", "if getattr(target, '__iter__', None): return target class new_meta(type(target)): if make_subquery: def __iter__(self): return", "if not only_globals: res = self._frame.f_locals.get(name, Unset) else: res = Unset if res", "or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals), expression=generator, **kwargs ) # Alias to", "') def visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self, node): self.stack.append(' in", "for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if", "self.stack.append(' not in ') def visit_Compare(self, node): self.visit(node.left) for op, expr in reversed(zip(node.ops,", "stack = self.stack = [] self.visit(node) assert len(stack) == 1, 'Remaining items %r", "self.stack.append(' >> ') def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append('", "visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1))", "= super().__new__(cls) cls.instance = res return res def __getitem__(self, key): return self def", "') def visit_Lt(self, node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <= ')", "+ ') def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' *", "visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' % node.n) def visit_UnaryOp(self,", "operator has spaces around it '%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops))", "reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg) for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args", "the old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a", "def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' // ') def", "} else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>':", "import_object from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType =", "self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for comp", "for op, expr in reversed(zip(node.ops, node.comparators)): self.visit(expr) self.visit(op) right = ''.join( # I", "get_value(self, name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset) else: res =", "'__iter__', None): return target class new_meta(type(target)): if make_subquery: def __iter__(self): return (x for", "def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for", "xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType", "else: lower = None if lower: res = '%s:' % lower else: res", "') def visit_LShift(self, node): self.stack.append(' << ') def visit_RShift(self, node): self.stack.append(' >> ')", "self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())] self._visit_generators(node)", "[] for _ in range(len(node.generators)): lines.append('for %s in %s' % (pop(), pop())) for", "self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key]", "for _ in range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt in reversed(node.elts):", "new_meta(type(target)): if make_subquery: def __iter__(self): return (x for x in this if isinstance(x,", "raise TypeError('Invalid node Yield') def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node):", "def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1)", "object. The `this` object is simply a name from which objects can be", "def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...')", "from xoutil.decorator.meta import decorator from xotl.ql import interfaces class Universe: '''The class of", "return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack", "try: self[key] except KeyError: return False else: return True def __getitem__(self, key): res", "return sub_query_or_value(res) else: return res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj,", "len(node.elts) == 1: result += ', )' else: result += ')' self.stack.append(result) del", "(x for x in thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False) class", "node): raise NotImplementedError def visit_Attribute(self, node): self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self,", "expression. ''' from xoutil.objects import import_object from .revenge import Uncompyled uncompiled = Uncompyled(func)", "self.visit(node.value) self.stack.append('%s.%s' % (self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1),", "node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node):", "as a decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__', None): return", "% (stack, node) return stack.pop() def visit_And(self, node): self.stack.append(' and ') def visit_Or(self,", "_ in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt in", "self.visit(elt) self.stack.append( '[%s]' % ', '.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) )", "kwargs: if call: call += ', ' call += '**%s' % kwargs self.stack.append('%s(%s)'", "locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self): builder =", "self.visit(node.lower) lower = self.stack.pop(-1) else: lower = None if lower: res = '%s:'", "x in this if isinstance(x, self)) else: def __iter__(self): return self def next(self):", "visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val) exprs", "') def visit_In(self, node): self.stack.append(' in ') def visit_NotIn(self, node): self.stack.append(' not in", "res is not Unset: return res else: raise NameError(name) @memoized_property def locals(self): return", "visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self,", "+= ', ' call += '*%s' % starargs if kwargs: if call: call", "(op, operand)) def visit_Invert(self, node): self.stack.append('~') def visit_Not(self, node): self.stack.append('not ') def visit_UAdd(self,", "+= ', ' call += '**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def", "return False else: return True def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries", "raise TypeError('Query object expected, but object provided ' 'is not: %r' % type(which))", "self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node):", "what the LICENCE file allows you to. # '''The query language core. '''", "'%s%s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right))", "a decorator @thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__', None): return target", "else: return res def get(self, key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries", "if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the length of ifs [*] self.visit(comp.iter)", "node): self.stack.append(' < ') def visit_LtE(self, node): self.stack.append(' <= ') def visit_Gt(self, node):", "None) if not res: res = super().__new__(cls) cls.instance = res return res def", "visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1) op = stack.pop(-1)", "for arg in reversed(node.args): self.visit(arg) self.visit(node.func) func = self.stack.pop(-1) args = [self.stack.pop(-1) for", "kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self", "already support the iterable protocol (i.e implement ``__iter__``), return it unchanged. If `make_subquery`", "that simply returns the same object and a ``next()`` method that immediately stops", "= _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner = self.f_globals.owner = self class _FrameView(MappingView, Mapping):", "wrapper:: class People: # ... pass query = (who for who in thesefy(People))", "raise NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property", "range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt in reversed(node.elts): self.visit(elt)", "return True def __getitem__(self, key): res = self._mapping[key] if self.owner.auto_expand_subqueries and key ==", "' call += ', '.join('%s=%s' % (k, v) for k, v in keywords)", "People: # ... pass query = (who for who in thesefy(People)) Example as", "else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object expected, but object provided '", "be equivalent to:: query = (who for who in (x for x in", "call += ', ' call += '**%s' % kwargs self.stack.append('%s(%s)' % (func, call))", "_ in range(len(node.ops)) ) self.stack.append('%s%s' % (self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs:", "# All rights reserved. # # This is free software; you can do", "Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return", "visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value)", "= self.f_globals.owner = self class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except", "_ in range(len(node.elts)) ) ) if len(node.elts) == 1: result += ', )'", "the `this`:obj: object. The `this` object is simply a name from which objects", "') def visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' / ')", "''' from types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if", "--------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors # All rights reserved. #", "== 1, 'Remaining items %r at %r' % (stack, node) return stack.pop() def", "self.visit(comp.iter) self.visit(comp.target) pop = lambda: self.stack.pop(-1) lines = [] for _ in range(len(node.generators)):", "call: call += ', ' call += ', '.join('%s=%s' % (k, v) for", "(stack, node) return stack.pop() def visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node):", "%s in %s' % (pop(), pop())) for if_ in range(pop()): # [*] pop", "(self.stack.pop(-1), right)) def visit_Call(self, node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw", "def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' % node.n) def", "def __new__(cls): res = getattr(cls, 'instance', None) if not res: res = super().__new__(cls)", "def __iter__(self): return self def next(self): raise StopIteration __next__ = next from xoutil.objects", "drawn in a query. ''' def __new__(cls): res = getattr(cls, 'instance', None) if", "self def __iter__(self): return self def next(self): raise StopIteration __next__ = next this", "res is Unset: res = self._frame.f_globals.get(name, Unset) if res is not Unset: return", "(x for x in this if isinstance(x, self)) else: def __iter__(self): return self", "def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for name,", "self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s", "class new_meta(type(target)): if make_subquery: def __iter__(self): return (x for x in this if", "equivalent to:: query = (who for who in (x for x in this", "(left, op, right)) def visit_Add(self, node): self.stack.append(' + ') def visit_Sub(self, node): self.stack.append('", "rights reserved. # # This is free software; you can do what the", "visit_DictComp(self, node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s: %s' %", "upper if step: res += ':%s' % step self.stack.append(res) def visit_List(self, node): for", "node): self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(),", "stack.pop(-1) stack.append('(%s%s%s)' % (left, op, right)) def visit_Add(self, node): self.stack.append(' + ') def", "return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return { name: cell.cell_contents", "'qst', '_frame' ) class QueryObject: frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs):", "node.upper: self.visit(node.upper) upper = self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower) lower", "immediately stops the iteration. Notice that in order to use `make_subquery` you call", "node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step = self.stack.pop(-1) else: step", "') def visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self,", "of ifs lines.append('if %s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid", "in reversed(node.elts): self.visit(elt) setbody = ', '.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' %", "range(pop()): # [*] pop the length of ifs lines.append('if %s' % pop()) self.stack.append('", "def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append(' * ') def", "'.join( self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if len(node.elts) == 1: result", "for x in thesefy(make_subquery=False)(Person)) # or simply as a decorator @thesefy(make_subquery=False) class Person:", "visit_Or(self, node): self.stack.append(' or ') def visit_Name(self, node): self.stack.append(node.id) def visit_BoolOp(self, node): self.visit(node.op)", "which @decorator def thesefy(target, make_subquery=True): '''Allow an object to participate in queries. Example", "self.stack.append(' + ') def visit_Sub(self, node): self.stack.append(' - ') def visit_Mult(self, node): self.stack.append('", "self class _FrameView(MappingView, Mapping): def __contains__(self, key): try: self[key] except KeyError: return False", "%s' % pop()) self.stack.append(' '.join(lines)) def visit_Yield(self, node): raise TypeError('Invalid node Yield') def", "'.join(self.stack.pop(-1) for _ in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]'", "# ... pass query = (who for who in thesefy(People)) Example as a", "(pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)'", "def visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^ ') def", "self.stack.pop(-1) else: upper = None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower", "+= ', '.join('%s=%s' % (k, v) for k, v in keywords) if starargs:", "_frame, **kwargs): self.qst = qst self._frame = _frame if any(name in RESERVED_ARGUMENTS for", "class Frame: def __init__(self, locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals =", "to:: query = (who for who in (x for x in this if", "node): if node.kwargs: self.visit(node.kwargs) if node.starargs: self.visit(node.starargs) for kw in reversed(node.keywords): self.visit(kw.value) self.stack.append(kw.arg)", "self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def visit_GeneratorExp(self, node): self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1))", "globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals) self.f_locals.owner", "self.stack.pop(-1) args = [self.stack.pop(-1) for _ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1))", "uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def normalize_query(which, **kwargs): '''Ensure a query object.", "xoutil.objects import memoized_property from collections import MappingView, Mapping from xoutil.decorator.meta import decorator from", "= import_object(frame_type or PredicateClass.frame_type) return PredicateClass( uncompiled.qst, FrameClass(_get_closure(func), func.__globals__), predicate=func, **kwargs ) def", "% setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node)", "') def visit_Pow(self, node): self.stack.append(' ** ') def visit_LShift(self, node): self.stack.append(' << ')", "for comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save the", "self.stack.append(res) def visit_List(self, node): for elt in reversed(node.elts): self.visit(elt) self.stack.append( '[%s]' % ',", "if res is Unset: res = self._frame.f_globals.get(name, Unset) if res is not Unset:", "def next(self): raise StopIteration __next__ = next this = Universe() RESERVED_ARGUMENTS = (", "' call += '**%s' % kwargs self.stack.append('%s(%s)' % (func, call)) def visit_Str(self, node):", "lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node): for", "use `make_subquery` you call `thesefy`:func: as a decorator-returning function:: class Person: pass query", "def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return {", "lower: res = '%s:' % lower else: res = ':' if upper: res", "node): # order does not really matter but I'm picky for k, v", "res def get(self, key, default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key", "self.stack.pop(-1) for _ in range(len(node.elts)) ) ) if len(node.elts) == 1: result +=", "= [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' %", "range(len(node.elts)) ) ) if len(node.elts) == 1: result += ', )' else: result", "visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test", "', '.join(args) if keywords: if call: call += ', ' call += ',", "key): return self def __getattr__(self, name): return self def __iter__(self): return self def", "%s' % (pop(), pop())) for if_ in range(pop()): # [*] pop the length", "v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor): def get_source(self, node):", "if node.kwargs else '' call = ', '.join(args) if keywords: if call: call", "' call += '*%s' % starargs if kwargs: if call: call += ',", "self.stack.pop(-1)) for _ in range(len(node.keywords)) ] starargs = self.stack.pop(-1) if node.starargs else ''", "self.visit(expr) self.visit(op) right = ''.join( # I assume each operator has spaces around", "def source(self): builder = SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get", "visit_IsNot(self, node): self.stack.append(' is not ') def visit_In(self, node): self.stack.append(' in ') def", "def locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def source(self): builder", "MappingView, Mapping from xoutil.decorator.meta import decorator from xotl.ql import interfaces class Universe: '''The", "not only_globals: res = self._frame.f_locals.get(name, Unset) else: res = Unset if res is", "the iteration. Notice that in order to use `make_subquery` you call `thesefy`:func: as", "def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return", "be a `query object`:term:, other types are a TypeError. ''' from types import", "for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)' % op.join(exprs)) def", "**kwargs ) # Alias to the old API. these = get_query_object def get_predicate_object(func,", "self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if", "obj.__closure__) } else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name ==", "% node.n) def visit_UnaryOp(self, node): stack = self.stack self.visit(node.op) self.visit(node.operand) operand = stack.pop(-1)", "is free software; you can do what the LICENCE file allows you to.", "frame_type=None, **kwargs): '''Get a predicate object from a predicate expression. ''' from xoutil.objects", "= lambda: self.stack.pop(-1) lines = [pop()] self._visit_generators(node) lines.append(pop()) self.stack.append(' '.join(lines)) def _visit_generators(self, node):", "= lambda: self.stack.pop(-1) lines = [] for _ in range(len(node.generators)): lines.append('for %s in", "default=None): res = self._mapping.get(key, default) if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res)", "self.visit(node.value) self.visit(node.key) pop = lambda: self.stack.pop(-1) lines = ['%s: %s' % (pop(), pop())]", "setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}'", "(self.stack.pop(-1), node.attr)) def visit_Subscript(self, node): self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self,", "self.stack.append('not ') def visit_UAdd(self, node): self.stack.append('+') def visit_USub(self, node): self.stack.append('-') def visit_IfExp(self, node):", "thesefy(People)) Example as a decorator:: @thesefy class People: pass query = (who for", "in range(len(node.keys)) ) self.stack.append('{%s}' % dictbody) def visit_Set(self, node): for elt in reversed(node.elts):", "class Person: pass query = (x for x in thesefy(make_subquery=False)(Person)) # or simply", "class SourceBuilder(ast.NodeVisitor): def get_source(self, node): stack = self.stack = [] self.visit(node) assert len(stack)", "node): for comp in reversed(node.generators): for if_ in reversed(comp.ifs): self.visit(if_) self.stack.append(len(comp.ifs)) # save", "res def __getitem__(self, key): return self def __getattr__(self, name): return self def __iter__(self):", "frame_type = 'xotl.ql.core.Frame' def __init__(self, qst, _frame, **kwargs): self.qst = qst self._frame =", "# '''The query language core. ''' import ast import types from xoutil.symbols import", "== 1: result += ', )' else: result += ')' self.stack.append(result) del decorator", "self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s'", "self.visit(op) right = ''.join( # I assume each operator has spaces around it", "lower = self.stack.pop(-1) else: lower = None if lower: res = '%s:' %", "SourceBuilder() return builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from", "Yield') def visit_Eq(self, node): self.stack.append(' == ') def visit_NotEq(self, node): self.stack.append(' != ')", "^ ') def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self, node): self.stack.append('%s' %", "= self._mapping[key] if self.owner.auto_expand_subqueries and key == '.0': return sub_query_or_value(res) else: return res", "self def __getattr__(self, name): return self def __iter__(self): return self def next(self): raise", "self.visit(node.slice) self.visit(node.value) self.stack.append('%s[%s]' % (self.stack.pop(-1), self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node):", "else: upper = None if node.lower: self.visit(node.lower) lower = self.stack.pop(-1) else: lower =", "else: return {} def sub_query_or_value(v): if isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return", "in order to use `make_subquery` you call `thesefy`:func: as a decorator-returning function:: class", "', ' call += '*%s' % starargs if kwargs: if call: call +=", "test = self.stack.pop(-1) orelse = self.stack.pop(-1) self.stack.append('(%s if %s else %s)' % (body,", "decorator:: @thesefy class People: pass query = (who for who in People) If", "If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that simply returns the same", "will be equivalent to:: query = (who for who in (x for x", "self.stack.pop(-1))) def visit_Ellipsis(self, node): self.stack.append('...') def visit_Slice(self, node): if node.step: self.visit(node.step) step =", "stops the iteration. Notice that in order to use `make_subquery` you call `thesefy`:func:", "visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self, node): self.stack.append(' // ') def visit_Num(self,", "else: return res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if", "res def __iter__(self): return iter(self._mapping) def _get_closure(obj): assert isinstance(obj, types.FunctionType) if obj.__closure__: return", "(c) <NAME> [~º/~] and Contributors # All rights reserved. # # This is", "'Remaining items %r at %r' % (stack, node) return stack.pop() def visit_And(self, node):", "__getattr__(self, name): return self def __iter__(self): return self def next(self): raise StopIteration __next__", "# Alias to the old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None,", "getattr(target, '__iter__', None): return target class new_meta(type(target)): if make_subquery: def __iter__(self): return (x", "builder.get_source(self.qst) def get_query_object(generator, query_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get the query object from a query", "% (pop(), pop())) for if_ in range(pop()): # [*] pop the length of", "**kwargs): '''Get a predicate object from a predicate expression. ''' from xoutil.objects import", "from xotl.ql.revenge import Uncompyled uncompiled = Uncompyled(generator) gi_frame = generator.gi_frame QueryObjectType = import_object(query_type)", "in range(len(node.elts))) self.stack.append('{%s}' % setbody) def visit_ListComp(self, node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def", "@thesefy(make_subquery=False) class Person: pass ''' if getattr(target, '__iter__', None): return target class new_meta(type(target)):", "visit_BoolOp(self, node): self.visit(node.op) for val in node.values: self.visit(val) exprs = [] for _", "1, 'Remaining items %r at %r' % (stack, node) return stack.pop() def visit_And(self,", "locals, globals, **kwargs): self.auto_expand_subqueries = kwargs.pop('auto_expand_subqueries', True) self.f_locals = _FrameView(locals) self.f_globals = _FrameView(globals)", ") ) if len(node.elts) == 1: result += ', )' else: result +=", "node): self._visit_comp(node) self.stack.append('[%s]' % self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def", "(who for who in (x for x in this if isinstance(x, People))) If", "def visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse", "file allows you to. # '''The query language core. ''' import ast import", "get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get a predicate object from a predicate expression. '''", "to the old API. these = get_query_object def get_predicate_object(func, predicate_type='xotl.ql.core.QueryObject', frame_type=None, **kwargs): '''Get", "return (x for x in this if isinstance(x, self)) else: def __iter__(self): return", "query = (who for who in People) If `target` already support the iterable", "People))) If `make_subquery` is False, `thesefy` injects an ``__iter__()`` that simply returns the", "None): return target class new_meta(type(target)): if make_subquery: def __iter__(self): return (x for x", "simply returns the same object and a ``next()`` method that immediately stops the", "visit_BitAnd(self, node): self.stack.append(' & ') def visit_BitXor(self, node): self.stack.append(' ^ ') def visit_FloorDiv(self,", "self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self,", "') def visit_BitOr(self, node): self.stack.append(' | ') def visit_BitAnd(self, node): self.stack.append(' & ')", "matter but I'm picky for k, v in reversed(zip(node.keys, node.values)): self.visit(v) self.visit(k) dictbody", "stack.pop() def visit_And(self, node): self.stack.append(' and ') def visit_Or(self, node): self.stack.append(' or ')", "exprs = [] for _ in range(len(node.values)): exprs.insert(0, self.stack.pop(-1)) op = self.stack.pop(-1) self.stack.append('(%s)'", "% node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise NotImplementedError def visit_Attribute(self, node):", "if starargs: if call: call += ', ' call += '*%s' % starargs", "(who for who in People) If `target` already support the iterable protocol (i.e", "dictbody = ', '.join( '%s: %s' % (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keys))", "that in order to use `make_subquery` you call `thesefy`:func: as a decorator-returning function::", "query language core. ''' import ast import types from xoutil.symbols import Unset from", "visit_IfExp(self, node): self.visit(node.orelse) self.visit(node.test) self.visit(node.body) body = self.stack.pop(-1) test = self.stack.pop(-1) orelse =", "visit_Mult(self, node): self.stack.append(' * ') def visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self,", "name, only_globals=False): if not only_globals: res = self._frame.f_locals.get(name, Unset) else: res = Unset", "a TypeError. ''' from types import GeneratorType if isinstance(which, GeneratorType): return get_query_object(which, **kwargs)", "% self.stack.pop(-1)) def visit_SetComp(self, node): self._visit_comp(node) self.stack.append('{%s}' % self.stack.pop(-1)) def visit_DictComp(self, node): self.visit(node.value)", "_ in range(len(node.args))] keywords = [ (self.stack.pop(-1), self.stack.pop(-1)) for _ in range(len(node.keywords)) ]", "NameError(name) @memoized_property def locals(self): return self._frame.f_locals @memoized_property def globals(self): return self._frame.f_globals @memoized_property def", "in range(len(node.elts)) ) ) def visit_Tuple(self, node): for elt in reversed(node.elts): self.visit(elt) result", "QueryObjectType = import_object(query_type) FrameType = import_object(frame_type or QueryObjectType.frame_type) return QueryObjectType( uncompiled.qst, FrameType(gi_frame.f_locals, gi_frame.f_globals),", "same object and a ``next()`` method that immediately stops the iteration. Notice that", "self._visit_comp(node) self.stack.append('(%s)' % self.stack.pop(-1)) def _visit_comp(self, node): self.visit(node.elt) pop = lambda: self.stack.pop(-1) lines", "keyword argument') self.expression = kwargs.pop('expression', None) for attr, val in kwargs.items(): setattr(self, attr,", "this = Universe() RESERVED_ARGUMENTS = ( 'limit', 'offset', 'groups', 'order', 'get_value', 'qst', '_frame'", "a query expression (more precisely a generator object) it is passed to `get_query_object`:func:", "test, orelse)) def visit_Lambda(self, node): raise NotImplementedError() def visit_Dict(self, node): # order does", "def visit_Str(self, node): self.stack.append('%r' % node.s) visit_Bytes = visit_Str def visit_Repr(self, node): raise", "isinstance(which, GeneratorType): return get_query_object(which, **kwargs) else: if not isinstance(which, interfaces.QueryObject): raise TypeError('Query object", "types.FunctionType) if obj.__closure__: return { name: cell.cell_contents for name, cell in zip(obj.__code__.co_freevars, obj.__closure__)", "all keyword arguments. If `which` is not a query expression it must be", "self.stack.append(' <= ') def visit_Gt(self, node): self.stack.append(' > ') def visit_GtE(self, node): self.stack.append('", "coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) <NAME> [~º/~] and Contributors #", "isinstance(v, types.GeneratorType) and v.gi_code.co_name == '<genexpr>': return get_query_object(v) else: return v class SourceBuilder(ast.NodeVisitor):", "self.stack self.visit(node.op) self.visit(node.right) self.visit(node.left) left = stack.pop(-1) right = stack.pop(-1) op = stack.pop(-1)", "= ['%s: %s' % (pop(), pop())] self._visit_generators(node) lines.append(pop()) self.stack.append('{%s}' % ' '.join(lines)) def", "if %s else %s)' % (body, test, orelse)) def visit_Lambda(self, node): raise NotImplementedError()", "if node.starargs else '' kwargs = self.stack.pop(-1) if node.kwargs else '' call =", "visit_Div(self, node): self.stack.append(' / ') def visit_Mod(self, node): self.stack.append(' % ') def visit_Pow(self," ]
[ "c3.append(desc) else: c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description:", "> tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3", "self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if", "= {} childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) >", "li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0])", "{len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]):", "self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst", "> table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno})", "0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = [] description =", "title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] = {} # yy", "c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno", "tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')", "{title}') c2 = c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) >", "Larz60+ import ScraperPaths import GetPage import CIA_ScanTools from lxml import html from lxml.cssselect", "print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element", "nitem = '' parts = item.split('\\n') for part in parts: nitem = f'{nitem.strip()}", "CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl,", "{} childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)')", "def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno", "> li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if", "= self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1 if __name__ == '__main__':", "tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3 =", "ScraperPaths import GetPage import CIA_ScanTools from lxml import html from lxml.cssselect import CSSSelector", "import ScraperPaths import GetPage import CIA_ScanTools from lxml import html from lxml.cssselect import", "len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = [] #", "yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)')", "import re import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths()", "self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl", "desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip())", "lxml import etree from lxml.etree import XPath import re import os import sys", "from lxml.etree import XPath import re import os import sys class CIA_InternationalOrgnizationsAndGroups: def", "self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1 if __name__ == '__main__': CIA_InternationalOrgnizationsAndGroups()", "> tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy:", "CIA_ScanTools from lxml import html from lxml.cssselect import CSSSelector from lxml import etree", "self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item or '\\n' in", "XPath import re import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath =", "if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = []", "self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename)", "= self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] = {} # yy =", "item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {}", "# yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) >", "yy: {len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element in", "self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl)", "yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1)", "self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item", "__init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename", "item or '\\n' in item: nitem = '' parts = item.split('\\n') for part", "html from lxml.cssselect import CSSSelector from lxml import etree from lxml.etree import XPath", "> td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description']", "c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B", "etree from lxml.etree import XPath import re import os import sys class CIA_InternationalOrgnizationsAndGroups:", "'\\n' in item: nitem = '' parts = item.split('\\n') for part in parts:", "tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1", "[] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n % 2 ==", "else: return item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups']", "from lxml.cssselect import CSSSelector from lxml import etree from lxml.etree import XPath import", "self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename", "== 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = [] description", "scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno =", "for part in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return item", "self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in", "part in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return item def", "html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True: xx =", "xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) == 0:", "print(f'Title: {title}') c2 = c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2)", "> div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) >", "= CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage =", "self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno})", "= c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno +=", "or '\\n' in item: nitem = '' parts = item.split('\\n') for part in", "else: c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}')", "table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) >", "= [] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n % 2", "self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno})", "= f'{nitem.strip()} {part.strip()}' return nitem else: return item def scrape_text(self): tree = html.fromstring(self.mainpage)", "return item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] =", "class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page", "tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) >", "> li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break title =", "ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools()", "div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B >", "span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title:", "# print(xx[0].text) if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}')", "import CSSSelector from lxml import etree from lxml.etree import XPath import re import", "from lxml import etree from lxml.etree import XPath import re import os import", "copyright (c) 2018 Larz60+ import ScraperPaths import GetPage import CIA_ScanTools from lxml import", "> table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: #", "def remove_fluff(self, item): if '\\r\\n' in item or '\\n' in item: nitem =", "= ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst =", "c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1", "import etree from lxml.etree import XPath import re import os import sys class", "> tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1)", "self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item or '\\n' in item:", "print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n % 2 == 0: desc", "import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp =", "enumerate(yy[0]): if n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3", "html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True:", "[] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1 if __name__", "2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = []", "lxml import html from lxml.cssselect import CSSSelector from lxml import etree from lxml.etree", "self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item or '\\n' in item: nitem", "item): if '\\r\\n' in item or '\\n' in item: nitem = '' parts", "description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1 if __name__ ==", "if n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 =", "import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage", "= GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links =", "import XPath import re import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath", "li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip())", "lxml.cssselect import CSSSelector from lxml import etree from lxml.etree import XPath import re", "td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] =", "= [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) # print(f'Description: {description}') childno += 1 if", "tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) >", "= tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy", "div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1:", "import CIA_ScanTools from lxml import html from lxml.cssselect import CSSSelector from lxml import", "import html from lxml.cssselect import CSSSelector from lxml import etree from lxml.etree import", "CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename", "# html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True: xx", "tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy =", "c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n %", "= c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) >", "= self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n'", "= html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 = self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while", "element in enumerate(yy[0]): if n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc)", "= c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n", "GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links", "0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] = {}", "> td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1)", "self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links()", "return nitem else: return item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1", "= tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) >", "re import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp", "remove_fluff(self, item): if '\\r\\n' in item or '\\n' in item: nitem = ''", "'\\r\\n' in item or '\\n' in item: nitem = '' parts = item.split('\\n')", "c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1)", "# copyright (c) 2018 Larz60+ import ScraperPaths import GetPage import CIA_ScanTools from lxml", "from lxml import html from lxml.cssselect import CSSSelector from lxml import etree from", "(c) 2018 Larz60+ import ScraperPaths import GetPage import CIA_ScanTools from lxml import html", "2018 Larz60+ import ScraperPaths import GetPage import CIA_ScanTools from lxml import html from", "> tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2)", "while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx)", "childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') #", "{part.strip()}' return nitem else: return item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree)", "self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html'", "in enumerate(yy[0]): if n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else:", "self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage", "item.split('\\n') for part in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return", "tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break title", "tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length yy: {len(yy[0])}')", "'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item):", "n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description']", "= {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) >", "== 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] =", "CSSSelector from lxml import etree from lxml.etree import XPath import re import os", "len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title]", "= '' parts = item.split('\\n') for part in parts: nitem = f'{nitem.strip()} {part.strip()}'", "table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') if len(yy[0]) > 1: # print(f'\\n...length", "= item.split('\\n') for part in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else:", "self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links", "print(xx[0].text) if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2", "GetPage import CIA_ScanTools from lxml import html from lxml.cssselect import CSSSelector from lxml", "% 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] =", "break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 = c1[title] = {} #", "> 1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}')", "> span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) #", "self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def", "if '\\r\\n' in item or '\\n' in item: nitem = '' parts =", "= self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description)", "> div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B", "in item: nitem = '' parts = item.split('\\n') for part in parts: nitem", "= 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self,", "parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return item def scrape_text(self): tree", "{} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1)", "td:nth-child(1)') yy = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > div:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) >", "parts = item.split('\\n') for part in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem", "c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if", "# print(f'{html.tostring(yy[0])}') for n, element in enumerate(yy[0]): if n % 2 == 0:", "nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return item def scrape_text(self): tree =", "= tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) == 0: break", "for n, element in enumerate(yy[0]): if n % 2 == 0: desc =", "= self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename =", "True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if len(xx) ==", "# print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for n,", "if len(xx) == 0: break title = self.remove_fluff(xx[0].text.strip()) # print(f'Title: {title}') c2 =", "1: # print(f'\\n...length yy: {len(yy[0])}') c3 = c2['Description'] = [] # print(f'{html.tostring(yy[0])}') for", "self.cst.fluffinutter(html.tostring(element).decode('utf-8')) c3.append(desc) else: c3 = c2['Description'] = [] description = self.remove_fluff(yy[0].text.strip()) c3.append(description) #", "item: nitem = '' parts = item.split('\\n') for part in parts: nitem =", "nitem else: return item def scrape_text(self): tree = html.fromstring(self.mainpage) # html.open_in_browser(tree) c1 =", "1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text) if", "'' parts = item.split('\\n') for part in parts: nitem = f'{nitem.strip()} {part.strip()}' return", "= self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item or", "self.getpage(self.mainurl, self.filename) self.scrape_text() self.cst.save_fact_links() def remove_fluff(self, item): if '\\r\\n' in item or '\\n'", "in parts: nitem = f'{nitem.strip()} {part.strip()}' return nitem else: return item def scrape_text(self):", "= self.cst.fact_links self.mainurl = 'https://www.cia.gov/library/publications/resources/the-world-factbook/appendix/appendix-b.html' self.filename = self.get_filename(self.mainurl) self.mainpage = self.getpage(self.mainurl, self.filename) self.scrape_text()", "= 1 while True: xx = tree.cssselect(f'#GetAppendix_B > li:nth-child({childno}) > span:nth-child(1)') # print(xx[0].text)", "sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage =", "lxml.etree import XPath import re import os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self):", "= self.fact_links['InternationalOrginizationsAndGroups'] = {} childno = 1 while True: xx = tree.cssselect(f'#GetAppendix_B >", "in item or '\\n' in item: nitem = '' parts = item.split('\\n') for", "def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage() self.getpage = self.gp.get_page self.get_filename =", "os import sys class CIA_InternationalOrgnizationsAndGroups: def __init__(self): self.spath = ScraperPaths.ScraperPaths() self.gp = GetPage.GetPage()", "= self.gp.get_page self.get_filename = self.gp.get_filename self.cst = CIA_ScanTools.CIA_Scan_Tools() self.fact_links = self.cst.fact_links self.mainurl =", "f'{nitem.strip()} {part.strip()}' return nitem else: return item def scrape_text(self): tree = html.fromstring(self.mainpage) #", "c2 = c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) > div:nth-child(2) > table:nth-child(1)", "n, element in enumerate(yy[0]): if n % 2 == 0: desc = self.cst.fluffinutter(html.tostring(element).decode('utf-8'))", "# print(f'Title: {title}') c2 = c1[title] = {} # yy = tree.cssselect(f'li.ln-a:nth-child({childno}) >", "import GetPage import CIA_ScanTools from lxml import html from lxml.cssselect import CSSSelector from" ]
[ "import wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0,", "'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, }", "否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai':", "'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False,", "next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base,", "'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag':", "'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0,", "False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate':", "1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue':", "= { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA,", "AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays':", "how to select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions':", "net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get", "'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True", "True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum':", "'rule-base' # [rule-base, stra] how to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE,", "(strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu':", "0, 0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai':", "'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility':", "stratree of nodes, how to select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet,", "'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache':", "actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra'", "'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra' }, },", "of stratree of nodes, how to select next path, [rule-base, random, net] 'type_stranet':", "path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra]", "'__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility = 50, 1", "{'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD',", "AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet,", "nodes, how to select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas,", "'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays,", "AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra' }, }, } wg4script.simulateframe(dic_mainparas=", "wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0,", "'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random',", "select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' #", "False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base',", "to select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base'", "type of stratree of nodes, how to select next path, [rule-base, random, net]", "= 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet,", "flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id':", "0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue':", "how to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet,", "{'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show':", "'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get candidate", "# flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0,", "candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions':", "strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False,", "to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas':", "# 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas':", "}, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd,", "wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas =", "'rule-base', # type of stratree of nodes, how to select next path, [rule-base,", "'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate:", "get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas,", "'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get candidate actions }, 'red': {'type_ai':", "'type_stra': 'rule-base', # type of stratree of nodes, how to select next path,", "'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False,", "True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据,", "0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA,", "# type of stratree of nodes, how to select next path, [rule-base, random,", "random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to", "0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra':", "{'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra' }, }, }", "'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, },", "strategy_id_b = 0, 0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = {", "'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree", "<gh_stars>1-10 # coding:utf-8 import sys sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ ==", "'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列", "= {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b),", "of nodes, how to select next path, [rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas':", "} dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids':", "'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0,", "# flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', #", "dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r,", "num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base',", "# [rule-base, stra] how to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra':", "sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b =", "'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True,", "stra] how to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet':", "'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai':", "= 0, 0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red':", "'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True,", "# coding:utf-8 import sys sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ == '__main__':", "{'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of nodes, how to select", "'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get candidate actions },", "{ 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra':", "AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0", "}, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra' },", "dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai':", "{'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet':", "'type_stra': 'net', 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'stra' }, }, } wg4script.simulateframe(dic_mainparas= dic_mainparas)", "wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility, 'num_xd':", "'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False,", "两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': {", "== '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility = 50,", "num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas", "'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm':", "'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../', 'str_global_flag': 'QD', 'num_plays': num_plays, 'num_objcutility': num_objcutility,", "'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of nodes, how to", "[rule-base, random, net] 'type_stranet': wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how", "import sys sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r,", "0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of nodes, how", "'type_stranet': wgdensestranet.StraDenseNet, }, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas", "False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False;", "50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet': wgdensestranet.StraDenseNet, },", "0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE,", "dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get candidate actions }, 'red':", "if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility", "num_plays, 'num_objcutility': num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm':", "True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True,", "wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays,", "num_objcutility = 50, 1 dic2_rolloutaiparas = { 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'rule-base', 'type_stranet':", "'flag_savestate': False, # flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra':", "{ 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of", "}, 'blue': {'type_ai': AI_QD.AI_QD_STRA, 'type_stra': 'random', 'type_stranet': wgdensestranet.StraDenseNet, }, } dic_mainparas = {'str_wgrootdir':'../../',", "保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of", "num_objcutility, 'num_xd': num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, #", "flag_savestate: 保存MCTS生成的数据, 'dic2_aiparas': { 'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type", "False, 'flag_afm': True, # 两个AI都为BASE时,flag_afm=False; 否则为True 'flag_dllnum': 0, 'cuda_id': 0, 'flag_savestate': False, #", "'flag_color4acai': 0, 'blue': {'type_ai': AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of nodes,", "[rule-base, stra] how to get candidate actions }, 'red': {'type_ai': AI_QD.AI_QD_BASE, 'type_stra': 'net',", "wgdensestranet.StraDenseNet, 'dic2_rolloutaiparas': dic2_rolloutaiparas, 'flag_candidateactions': 'rule-base' # [rule-base, stra] how to get candidate actions", "sys sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd, strategy_id_r, strategy_id_b", "AI_QD.AI_QD_HA, 'type_stra': 'rule-base', # type of stratree of nodes, how to select next", "strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility = 50, 1 dic2_rolloutaiparas =", "coding:utf-8 import sys sys.path.append('../../pythonModules') import wg4script, AI_QD, wgdensestranet if __name__ == '__main__': num_xd,", "__name__ == '__main__': num_xd, strategy_id_r, strategy_id_b = 0, 0, 0 num_plays, num_objcutility =", "num_xd, 'strategy_ids': (strategy_id_r, strategy_id_b), 'flag_show': True, 'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache':", "'flag_action_cache': False, 'flag_qd_rm': True, # flag_qd_rm保存数据库roomrd动作序列 'flag_cache': False, 'flag_gpu': False, 'flag_afm': True, #" ]
[ "if not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if", "res def helper(self,nums,res,path): if not nums: res.append(path) return res for i in range(len(nums)):", "return [[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums: res.append(path)", "not nums: res.append(path) return res for i in range(len(nums)): cur = nums[i] left=nums[:i]+nums[i+1:]", "self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums: res.append(path) return res for i", "nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums:", "def permute(self, nums: List[int]) -> List[List[int]]: if not nums: return [[]] path=[] res=[]", "Solution: def permute(self, nums: List[int]) -> List[List[int]]: if not nums: return [[]] path=[]", "res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums: res.append(path) return res for", "[[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums: res.append(path) return", "def helper(self,nums,res,path): if not nums: res.append(path) return res for i in range(len(nums)): cur", "<filename>permutations/permutations.py<gh_stars>0 class Solution: def permute(self, nums: List[int]) -> List[List[int]]: if not nums: return", "-> List[List[int]]: if not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res def", "nums: List[int]) -> List[List[int]]: if not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return", "not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not", "path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path): if not nums: res.append(path) return res", "return res def helper(self,nums,res,path): if not nums: res.append(path) return res for i in", "List[int]) -> List[List[int]]: if not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res", "if not nums: res.append(path) return res for i in range(len(nums)): cur = nums[i]", "helper(self,nums,res,path): if not nums: res.append(path) return res for i in range(len(nums)): cur =", "nums: res.append(path) return res for i in range(len(nums)): cur = nums[i] left=nums[:i]+nums[i+1:] self.helper(left,res,path+[cur])", "class Solution: def permute(self, nums: List[int]) -> List[List[int]]: if not nums: return [[]]", "permute(self, nums: List[int]) -> List[List[int]]: if not nums: return [[]] path=[] res=[] self.helper(nums,res,path)", "List[List[int]]: if not nums: return [[]] path=[] res=[] self.helper(nums,res,path) return res def helper(self,nums,res,path):" ]
[ "from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = {", "# Copyright 2017 Mirantis Inc. # # Licensed under the Apache License, Version", "Unless required by applicable law or agreed to in writing, software # distributed", "}, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties':", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'},", "the License. You may obtain # a copy of the License at #", "['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes',", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'state': { 'type':", "copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics =", "'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required':", "}, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details': { 'type': 'array',", "['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } }", "'utilisation'] } }, 'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': {", "'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string',", "'response_body': { 'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum': [ 'pending',", "with the License. You may obtain # a copy of the License at", "'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']},", "['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False,", "'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop',", "{'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests',", "'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']},", "False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate']", "'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties':", "'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets':", "use this file except in compliance with the License. You may obtain #", "under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers", "{'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type':", "'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime':", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation']", "implied. See the # License for the specific language governing permissions and limitations", "['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } },", "show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'state': {", "'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'], } } get_server = copy.deepcopy(servers247.get_server)", "False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details',", "['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False,", "'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details': { 'type': 'array', 'items':", "you may # not use this file except in compliance with the License.", "[ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum':", "['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics':", "KIND, either express or implied. See the # License for the specific language", "['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'},", "'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum': [ 'libvirt',", "{ 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required':", "file except in compliance with the License. You may obtain # a copy", "[ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type':", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets':", "'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} },", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer',", "{'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop':", "{'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type':", "'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor',", "{ 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type':", "'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address',", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]},", "'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details':", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address,", "the # License for the specific language governing permissions and limitations # under", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] },", "'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] }", "'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets',", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type':", "'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer',", "permissions and limitations # under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types", "'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items': { 'type':", "} }, 'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'read_bytes':", "required by applicable law or agreed to in writing, software # distributed under", "{'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details':", "['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer',", "'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array',", "'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer',", "applicable law or agreed to in writing, software # distributed under the License", "'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details':", "{'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type':", "'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum':", "in compliance with the License. You may obtain # a copy of the", "or agreed to in writing, software # distributed under the License is distributed", "{'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type':", "parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body':", "'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': {", "'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time':", "'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer',", "{ 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']},", "'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors':", "{'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate',", "'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer',", "False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required':", "'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'], }", "'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum': [", "'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type':", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "governing permissions and limitations # under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import", "tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type':", "['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array',", "'array', 'items': { 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type':", "'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi',", "{'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks':", "'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': {", "[parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type':", "'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors',", "'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': {", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "{ 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']},", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "{ 'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended']", "2.0 (the \"License\"); you may # not use this file except in compliance", "'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate':", "'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items': { 'type':", "'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items':", "'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type':", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "2017 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the", "['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors',", "License, Version 2.0 (the \"License\"); you may # not use this file except", "the specific language governing permissions and limitations # under the License. import copy", "'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id',", "{'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type':", "{'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'},", "'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused',", "'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']},", "and limitations # under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from", "{'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets',", "{ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type':", "'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop':", "['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type':", "'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string',", "agreed to in writing, software # distributed under the License is distributed on", "'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items': { 'type': 'object',", "} }, 'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address':", "'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required': [", "['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer',", "'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate':", "['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False,", "'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type':", "limitations # under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47", "{ 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver':", "'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required':", "# Unless required by applicable law or agreed to in writing, software #", "{'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used':", "'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'], } } get_server =", "by applicable law or agreed to in writing, software # distributed under the", "'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type':", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details': { 'type': 'array', 'items': {", "'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer',", "'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']},", "'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': {", "'items': { 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer',", "'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties':", "'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum',", "}, 'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] },", "'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'], } }", "'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False,", "tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = { 'status_code':", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "'properties': { 'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown', 'crashed',", "except in compliance with the License. You may obtain # a copy of", "'errors_count'] } } }, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime',", "to in writing, software # distributed under the License is distributed on an", "{ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']},", "{ 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'state': { 'type': 'string',", "'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']}", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "# not use this file except in compliance with the License. You may", "'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests',", "'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type':", "# License for the specific language governing permissions and limitations # under the", "'type': 'array', 'items': { 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests':", "}, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': {'type':", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False,", "'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']},", "Version 2.0 (the \"License\"); you may # not use this file except in", "'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties':", "'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes',", "[ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details',", "\"License\"); you may # not use this file except in compliance with the", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': {", "'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'},", "'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os':", "'object', 'properties': { 'state': { 'type': 'string', 'enum': [ 'pending', 'running', 'paused', 'shutdown',", "language governing permissions and limitations # under the License. import copy from tempest.lib.api_schema.response.compute.v2_1", "'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer',", "not use this file except in compliance with the License. You may obtain", "'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors':", "'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os',", "'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus': {'type':", "License for the specific language governing permissions and limitations # under the License.", "} } }, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive',", "{'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties':", "'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items': {", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']},", "'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} },", "'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes':", "'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests':", "Copyright 2017 Mirantis Inc. # # Licensed under the Apache License, Version 2.0", "{'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type':", "'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count']", "OF ANY KIND, either express or implied. See the # License for the", "{'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type':", "'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets',", "'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'],", "'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details': { 'type':", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "(the \"License\"); you may # not use this file except in compliance with", "# # Unless required by applicable law or agreed to in writing, software", "}, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']},", "'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } },", "'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']}", "'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type':", "License. You may obtain # a copy of the License at # #", "'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type':", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] }", "{'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type':", "{ 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer',", "'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']},", "ANY KIND, either express or implied. See the # License for the specific", "{'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type':", "} }, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus',", "the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as", "as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': {", "['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer',", "['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer',", "'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']},", "{'type': 'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer',", "'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']},", "'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required':", "'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']},", "'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets':", "'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type': 'boolean'}, 'num_cpus':", "{'type': 'boolean'}, 'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': {", "['id', 'time', 'utilisation'] } }, 'nic_details': { 'type': 'array', 'items': { 'type': 'object',", "'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks',", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details',", "{'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used']", "}, 'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'mac_address': {'oneOf':", "{ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer', 'null']},", "import servers as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type': 'object',", "}, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop',", "See the # License for the specific language governing permissions and limitations #", "'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type':", "{'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer', 'null']} }, 'additionalProperties':", "law or agreed to in writing, software # distributed under the License is", "express or implied. See the # License for the specific language governing permissions", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "[200], 'response_body': { 'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum': [", "'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']},", "'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required': [ 'state', 'driver',", "['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time', 'utilisation'] } }, 'nic_details': {", "{ 'type': 'object', 'properties': { 'mac_address': {'oneOf': [parameter_types.mac_address, {'type': 'null'}]}, 'rx_octets': {'type': ['integer',", "'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor':", "for the specific language governing permissions and limitations # under the License. import", "servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'state':", "['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } }, 'additionalProperties': False, 'required': [ 'state',", "{ 'read_bytes': {'type': ['integer', 'null']}, 'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']},", "'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic',", "'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation':", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "{'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties':", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics', 'num_disks', 'memory_details', 'cpu_details', 'nic_details', 'disk_details'], } } get_server", "'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details':", "compliance with the License. You may obtain # a copy of the License", "'read_requests': {'type': ['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count':", "}, 'additionalProperties': False, 'required': [ 'state', 'driver', 'hypervisor', 'hypervisor_os', 'uptime', 'config_drive', 'num_cpus', 'num_nics',", "'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum':", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object',", "# under the License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import", "import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics = { 'status_code': [200],", "{'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type':", "{ 'type': 'object', 'properties': { 'state': { 'type': 'string', 'enum': [ 'pending', 'running',", "['integer', 'null']}, 'write_bytes': {'type': ['integer', 'null']}, 'write_requests': {'type': ['integer', 'null']}, 'errors_count': {'type': ['integer',", "servers as servers247 show_server_diagnostics = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties':", "{'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type':", "'used'] }, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id':", "'suspended'] }, 'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv']", "'paused', 'shutdown', 'crashed', 'suspended'] }, 'driver': { 'type': 'string', 'enum': [ 'libvirt', 'xenapi',", "'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type':", "['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer',", "['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer',", "may # not use this file except in compliance with the License. You", "{ 'id': {'type': ['integer', 'null']}, 'time': {'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']}", "'num_cpus': {'type': 'integer'}, 'num_nics': {'type': 'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object',", "{'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] }", "either express or implied. See the # License for the specific language governing", "{'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} }, 'additionalProperties':", "this file except in compliance with the License. You may obtain # a", "or implied. See the # License for the specific language governing permissions and", "'null'}]}, 'rx_octets': {'type': ['integer', 'null']}, 'rx_errors': {'type': ['integer', 'null']}, 'rx_drop': {'type': ['integer', 'null']},", "'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']} },", "'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer', 'null']}, 'tx_packets':", "['integer', 'null']}, 'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] },", "'time', 'utilisation'] } }, 'nic_details': { 'type': 'array', 'items': { 'type': 'object', 'properties':", "['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']}, 'tx_errors': {'type': ['integer', 'null']}, 'tx_drop': {'type': ['integer',", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "'null']} }, 'additionalProperties': False, 'required': ['read_bytes', 'read_requests', 'write_bytes', 'write_requests', 'errors_count'] } } },", "{'type': ['integer', 'null']}, 'utilisation': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['id', 'time',", "'hypervisor': {'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive':", "}, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items': {", "'array', 'items': { 'type': 'object', 'properties': { 'id': {'type': ['integer', 'null']}, 'time': {'type':", "'rx_rate', 'tx_octets', 'tx_errors', 'tx_drop', 'tx_packets', 'tx_rate'] } }, 'disk_details': { 'type': 'array', 'items':", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "specific language governing permissions and limitations # under the License. import copy from", "['integer', 'null']} }, 'additionalProperties': False, 'required': ['mac_address', 'rx_octets', 'rx_errors', 'rx_drop', 'rx_packets', 'rx_rate', 'tx_octets',", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "License. import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247", "'used': {'type': ['integer', 'null']} }, 'additionalProperties': False, 'required': ['maximum', 'used'] }, 'cpu_details': {", "'required': ['id', 'time', 'utilisation'] } }, 'nic_details': { 'type': 'array', 'items': { 'type':", "import copy from tempest.lib.api_schema.response.compute.v2_1 import parameter_types from tempest.lib.api_schema.response.compute.v2_47 import servers as servers247 show_server_diagnostics", "{'type': ['string', 'null']}, 'hypervisor_os': {'type': ['string', 'null']}, 'uptime': {'type': ['integer', 'null']}, 'config_drive': {'type':", "'null']}, 'rx_packets': {'type': ['integer', 'null']}, 'rx_rate': {'type': ['integer', 'null']}, 'tx_octets': {'type': ['integer', 'null']},", "}, 'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'read_bytes': {'type':", "['maximum', 'used'] }, 'cpu_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': {", "'tx_drop': {'type': ['integer', 'null']}, 'tx_packets': {'type': ['integer', 'null']}, 'tx_rate': {'type': ['integer', 'null']} },", "'disk_details': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'read_bytes': {'type': ['integer',", "'integer'}, 'num_disks': {'type': 'integer'}, 'memory_details': { 'type': 'object', 'properties': { 'maximum': {'type': ['integer',", "'type': 'string', 'enum': [ 'libvirt', 'xenapi', 'vmwareapi', 'ironic', 'hyperv'] }, 'hypervisor': {'type': ['string',", "{ 'type': 'object', 'properties': { 'maximum': {'type': ['integer', 'null']}, 'used': {'type': ['integer', 'null']}" ]
[ "item x += 1 if x==num: break return func def series(condition=None, process=None, number=10):", "# Simple series generator with # multiple generators & decorators. # Author :", "cond = kwds['condition'] proc = kwds['process'] num = kwds['number'] x = 0 for", "if x==num: break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x", "process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1: yield x x", "decorators. # Author : <NAME> def myfunc(**kwds): def func(f): cond = kwds['condition'] proc", "series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1: yield x", "Simple series generator with # multiple generators & decorators. # Author : <NAME>", "with # multiple generators & decorators. # Author : <NAME> def myfunc(**kwds): def", "@myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1: yield x x += 1", "& decorators. # Author : <NAME> def myfunc(**kwds): def func(f): cond = kwds['condition']", "f(): if cond and cond(item): if proc: item = proc(item) yield item x", "x += 1 if x==num: break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number)", "number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1: yield x x +=", "0 for item in f(): if cond and cond(item): if proc: item =", "yield item x += 1 if x==num: break return func def series(condition=None, process=None,", "1 if x==num: break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper():", "in f(): if cond and cond(item): if proc: item = proc(item) yield item", "if cond and cond(item): if proc: item = proc(item) yield item x +=", "def func(f): cond = kwds['condition'] proc = kwds['process'] num = kwds['number'] x =", ": <NAME> def myfunc(**kwds): def func(f): cond = kwds['condition'] proc = kwds['process'] num", "kwds['condition'] proc = kwds['process'] num = kwds['number'] x = 0 for item in", "series generator with # multiple generators & decorators. # Author : <NAME> def", "# Author : <NAME> def myfunc(**kwds): def func(f): cond = kwds['condition'] proc =", "kwds['number'] x = 0 for item in f(): if cond and cond(item): if", "= proc(item) yield item x += 1 if x==num: break return func def", "def wrapper(): x = 1 while 1: yield x x += 1 return", "def myfunc(**kwds): def func(f): cond = kwds['condition'] proc = kwds['process'] num = kwds['number']", "Author : <NAME> def myfunc(**kwds): def func(f): cond = kwds['condition'] proc = kwds['process']", "kwds['process'] num = kwds['number'] x = 0 for item in f(): if cond", "proc(item) yield item x += 1 if x==num: break return func def series(condition=None,", "generators & decorators. # Author : <NAME> def myfunc(**kwds): def func(f): cond =", "cond(item): if proc: item = proc(item) yield item x += 1 if x==num:", "<NAME> def myfunc(**kwds): def func(f): cond = kwds['condition'] proc = kwds['process'] num =", "for item in f(): if cond and cond(item): if proc: item = proc(item)", "item in f(): if cond and cond(item): if proc: item = proc(item) yield", "return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while", "= kwds['condition'] proc = kwds['process'] num = kwds['number'] x = 0 for item", "<gh_stars>1000+ # Simple series generator with # multiple generators & decorators. # Author", "break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1", "proc = kwds['process'] num = kwds['number'] x = 0 for item in f():", "proc: item = proc(item) yield item x += 1 if x==num: break return", "wrapper(): x = 1 while 1: yield x x += 1 return wrapper", "if proc: item = proc(item) yield item x += 1 if x==num: break", "# multiple generators & decorators. # Author : <NAME> def myfunc(**kwds): def func(f):", "x==num: break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x =", "func(f): cond = kwds['condition'] proc = kwds['process'] num = kwds['number'] x = 0", "+= 1 if x==num: break return func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def", "def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1: yield", "= kwds['number'] x = 0 for item in f(): if cond and cond(item):", "generator with # multiple generators & decorators. # Author : <NAME> def myfunc(**kwds):", "x = 0 for item in f(): if cond and cond(item): if proc:", "cond and cond(item): if proc: item = proc(item) yield item x += 1", "= kwds['process'] num = kwds['number'] x = 0 for item in f(): if", "and cond(item): if proc: item = proc(item) yield item x += 1 if", "myfunc(**kwds): def func(f): cond = kwds['condition'] proc = kwds['process'] num = kwds['number'] x", "item = proc(item) yield item x += 1 if x==num: break return func", "multiple generators & decorators. # Author : <NAME> def myfunc(**kwds): def func(f): cond", "func def series(condition=None, process=None, number=10): @myfunc(condition=condition,process=process,number=number) def wrapper(): x = 1 while 1:", "= 0 for item in f(): if cond and cond(item): if proc: item", "num = kwds['number'] x = 0 for item in f(): if cond and" ]
[ "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations =", "'0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='oferty.OfertyUsers'), ), ]", "# Generated by Django 2.1.2 on 2018-12-03 10:19 from django.db import migrations, models", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField(", "= [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ]", "class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest',", "by Django 2.1.2 on 2018-12-03 10:19 from django.db import migrations, models import django.db.models.deletion", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [", "dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi', field=models.ForeignKey(null=True,", "('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='oferty.OfertyUsers'), ),", "2018-12-03 10:19 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations", "on 2018-12-03 10:19 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "Django 2.1.2 on 2018-12-03 10:19 from django.db import migrations, models import django.db.models.deletion class", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty',", "10:19 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "Generated by Django 2.1.2 on 2018-12-03 10:19 from django.db import migrations, models import", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'),", "2.1.2 on 2018-12-03 10:19 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "[ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='oferty.OfertyUsers'),", "Migration(migrations.Migration): dependencies = [ ('oferty', '0011_ofertyest_kto_prowadzi'), ] operations = [ migrations.AlterField( model_name='ofertyest', name='kto_prowadzi'," ]
[ "counterJose <= CircJose: PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1 = 1", "+= 2 if PosVivo1 > counterJose: PosVivo1 = 1 counterJose += 1 if", "CircJose = int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav = int(input()) #var", "= 1 counterJose += 1 if PosVivo1 == PosJose: vivos += 1 while", "= 1 PosVivo2 = 1 while counterJose <= CircJose: PosVivo1 += 2 if", "CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2 = 1", "PosVivo1 = 1 PosVivo2 = 1 while counterJose <= CircJose: PosVivo1 += 2", "CircJose: PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1 = 1 counterJose +=", "PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1 = 1 counterJose += 1", "#var counterJose = 1 CounterFlav = 1 vivos = 0 PosVivo1 = 1", "2 if PosVivo1 > counterJose: PosVivo1 = 1 counterJose += 1 if PosVivo1", "PosJose = int(input()) CircFlav = int(input()) PosFlav = int(input()) #var counterJose = 1", "<= CircJose: PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1 = 1 counterJose", "counterJose: PosVivo1 = 1 counterJose += 1 if PosVivo1 == PosJose: vivos +=", "1 while CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2", "int(input()) #var counterJose = 1 CounterFlav = 1 vivos = 0 PosVivo1 =", "1 counterJose += 1 if PosVivo1 == PosJose: vivos += 1 while CounterFlav", "if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav += 1 if PosVivo2 ==", "1 CounterFlav = 1 vivos = 0 PosVivo1 = 1 PosVivo2 = 1", "vivos += 1 while CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2 >", "int(input()) PosFlav = int(input()) #var counterJose = 1 CounterFlav = 1 vivos =", "PosVivo1 == PosJose: vivos += 1 while CounterFlav <= CircFlav: PosVivo2 += 2", "0 PosVivo1 = 1 PosVivo2 = 1 while counterJose <= CircJose: PosVivo1 +=", "+= 1 while CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav:", "2 if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav += 1 if PosVivo2", "= 1 CounterFlav = 1 vivos = 0 PosVivo1 = 1 PosVivo2 =", "if PosVivo1 == PosJose: vivos += 1 while CounterFlav <= CircFlav: PosVivo2 +=", "int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav = int(input()) #var counterJose =", "= int(input()) PosFlav = int(input()) #var counterJose = 1 CounterFlav = 1 vivos", "#L02EX04 #inputs CircJose = int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav =", "if PosVivo1 > counterJose: PosVivo1 = 1 counterJose += 1 if PosVivo1 ==", "<filename>Listas de Python/Lista 2/L02EX04.py #L02EX04 #inputs CircJose = int(input()) PosJose = int(input()) CircFlav", "= int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav = int(input()) #var counterJose", "while counterJose <= CircJose: PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1 =", "int(input()) CircFlav = int(input()) PosFlav = int(input()) #var counterJose = 1 CounterFlav =", "<= CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav", "== PosJose: vivos += 1 while CounterFlav <= CircFlav: PosVivo2 += 2 if", "PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav += 1 if PosVivo2 == PosFlav:", "1 while counterJose <= CircJose: PosVivo1 += 2 if PosVivo1 > counterJose: PosVivo1", "#inputs CircJose = int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav = int(input())", "counterJose = 1 CounterFlav = 1 vivos = 0 PosVivo1 = 1 PosVivo2", "1 vivos = 0 PosVivo1 = 1 PosVivo2 = 1 while counterJose <=", "1 PosVivo2 = 1 while counterJose <= CircJose: PosVivo1 += 2 if PosVivo1", "PosVivo1 = 1 counterJose += 1 if PosVivo1 == PosJose: vivos += 1", "counterJose += 1 if PosVivo1 == PosJose: vivos += 1 while CounterFlav <=", "= 1 while counterJose <= CircJose: PosVivo1 += 2 if PosVivo1 > counterJose:", "de Python/Lista 2/L02EX04.py #L02EX04 #inputs CircJose = int(input()) PosJose = int(input()) CircFlav =", "+= 2 if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav += 1 if", "1 if PosVivo1 == PosJose: vivos += 1 while CounterFlav <= CircFlav: PosVivo2", "CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav +=", "= 1 CounterFlav += 1 if PosVivo2 == PosFlav: vivos += 1 print(vivos)", "PosJose: vivos += 1 while CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2", "PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2 = 1 CounterFlav += 1", "Python/Lista 2/L02EX04.py #L02EX04 #inputs CircJose = int(input()) PosJose = int(input()) CircFlav = int(input())", "PosFlav = int(input()) #var counterJose = 1 CounterFlav = 1 vivos = 0", "vivos = 0 PosVivo1 = 1 PosVivo2 = 1 while counterJose <= CircJose:", "2/L02EX04.py #L02EX04 #inputs CircJose = int(input()) PosJose = int(input()) CircFlav = int(input()) PosFlav", "PosVivo1 > counterJose: PosVivo1 = 1 counterJose += 1 if PosVivo1 == PosJose:", "PosVivo2 = 1 while counterJose <= CircJose: PosVivo1 += 2 if PosVivo1 >", "+= 1 if PosVivo1 == PosJose: vivos += 1 while CounterFlav <= CircFlav:", "while CounterFlav <= CircFlav: PosVivo2 += 2 if PosVivo2 > CounterFlav: PosVivo2 =", "= int(input()) #var counterJose = 1 CounterFlav = 1 vivos = 0 PosVivo1", "CounterFlav = 1 vivos = 0 PosVivo1 = 1 PosVivo2 = 1 while", "CircFlav = int(input()) PosFlav = int(input()) #var counterJose = 1 CounterFlav = 1", "= 1 vivos = 0 PosVivo1 = 1 PosVivo2 = 1 while counterJose", "> CounterFlav: PosVivo2 = 1 CounterFlav += 1 if PosVivo2 == PosFlav: vivos", "CounterFlav: PosVivo2 = 1 CounterFlav += 1 if PosVivo2 == PosFlav: vivos +=", "= int(input()) CircFlav = int(input()) PosFlav = int(input()) #var counterJose = 1 CounterFlav", "PosVivo2 = 1 CounterFlav += 1 if PosVivo2 == PosFlav: vivos += 1", "> counterJose: PosVivo1 = 1 counterJose += 1 if PosVivo1 == PosJose: vivos", "= 0 PosVivo1 = 1 PosVivo2 = 1 while counterJose <= CircJose: PosVivo1" ]
[ "return ([_int2date(d) for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b", "= observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if", "'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'],", "**kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper", "None, set to rank from SVD minus 1 p : int in [0,", "difference pct_change : bool whether to apply pct_change operator periods : int, default", "= dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None): if", "1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0,", "---------- series_id : str or list of str Labels of series to retrieve", "= self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is", "keep label : str, default is None New label to rename returned series", "x : 2D array T observations/samples in rows, N variables/features in columns kmax", "{'diff': 0, 'log': 1}, 5: {'diff': 1, 'log': 1}, 6: {'diff': 2, 'log':", "io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url,", "= df.groupby('date').cumcount() df = df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\", "\"M\" step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of", "if r is None: return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if", "5: {'diff': 1, 'log': 1}, 6: {'diff': 2, 'log': 1}, 7: {'diff': 1,", "return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series data to an output", "Stock & Watson (1998) and Bai & Ng (2002) Parameters ---------- X :", "- 1) * 100 # pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr))", "return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod", "def keys(self): \"\"\"Return id names of all loaded series data\"\"\" return list(self.cache_.keys()) def", "name from vintage \"\"\" url = url or _fred_md_url if isinstance(vintage, int) and", "algorithm Author: <NAME> License: MIT \"\"\" import os import sys import json import", "echo=ECHO): \"\"\"Fill in missing data with factor model and EM algorithm of <NAME>", "= [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1] ic", "'log': 0}, 2: {'diff': 1, 'log': 0}, 3: {'diff': 2, 'log': 0}, 4:", "of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in", "config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to", "to rename returned series release : pd.DateOffset or int (default is 0) maximum", "= 'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/' + csvfile_", "1)]) mR2 = mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1)", "number of factors based on Bai & Ng (2002) criterion Parameters ---------- x", "Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P div yield',", "= df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns or keep] def __init__(self,", ": str or list of str ids of series to retrieve Returns -------", "M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y] = y[Y]", "except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return", "= beta.T @ beta is covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,", "None start and end period dates (inclusive) to keep label : str, default", "of times to take difference pct_change : bool whether to apply pct_change operator", "// 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url,", "div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s", "# load via api if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except:", "is s**2/(T-1) y = pca.transform(x) # y = s * u: T x", "= pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0]", "in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column,", "record of a series\"\"\" if series_id not in self.header_: try: if series_id not", "= Change x(t) - x(t-1) # ch1 = Change from Year Ago x(t)", "df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current", "vintage)) & (df.index >= start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API", "url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else: df", "pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in", "States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500', \"S&P's Common Stock Price", "in np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:, col], col] = np.nanmean(X[:,", "observations to keep diff, log, pct_change : int number of difference, log and", "= f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0]", "date YYYYMM if url is None: then derive subfolder name from vintage \"\"\"", "= {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P:", "or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all", "by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format", "-1) X = (X - mean) / std # standardize # \"M\" step:", "loaded \"\"\" df = DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity',", "*args, echo=None): if echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data", "/ C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign =", "the variable in the column Notes ----- See <NAME> Ng (2002) and McCracken", "= {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF':", "{series_id} ***\") def keys(self): \"\"\"Return id names of all loaded series data\"\"\" return", "category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if args else '') r =", "k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args", "t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None)", "@ u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax or len(s)) + 1)])", "R2 from adding kth (orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @", "Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() #", "access and data manipulation\"\"\" self.api_key = api_key self.start = start self.end = end", "if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]])) def _date2int(date):", "# undo standardization M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2", "0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series':", ": int number of difference, log and pct_change operations to apply freq :", "0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base", "r is None: return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name =", "(N * T)/(N + T) C2 = min(N, T) penalty = [np.log(NT2) /", "Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude", "to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id] s =", "echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for", "['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0]", "len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series data to an output file\"\"\"", "or len(s)) mR2 = [0] + list(s**2 / (N * T)) # first", "201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' +", "standardize=False, echo=ECHO): \"\"\"Determine number of factors based on Bai & Ng (2002) criterion", "is None base name of url, local file path or zipfile archive Returns", "return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id):", "Annual Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural Log", "df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] =", "Monthly # BW = Biweekly # W = Weekly # D = Daily", "True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1,", "'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log':", "kmax = min(len(s), kmax or len(s)) mR2 = [0] + list(s**2 / (N", "to retrieve Returns ------- n : int length of observations dataframe \"\"\" if", "str in {'M', 'Q', 'A'}, default is None set periodicity of dates log", "for FRED access and data manipulation\"\"\" self.api_key = api_key self.start = start self.end", "- x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1 =", "int, default is 0 number of times to take log diff : int,", "+= QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']:", "= (X * std) + mean # undo standardization M['kmax'] = r M['converge']", "series names, by page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data =", "{'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff':", "y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y]", "axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s))", "+ 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int) and vintage:", "= Seasonally Adjusted # NSA = Not Seasonally Adjusted # SAAR = Seasonally", "cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously", "[]) c['series'] = [] offset = 0 while True: s = self.category(category_id, api=\"category/series\",", "Watson (1998) and Bai & Ng (2002) Parameters ---------- X : 2D array", "rank from SVD minus 1 p : int in [0, 1, 2, 3],", "variance of residuals after k components lnvar = np.log(np.where(var > 0, var, 1e-26))", "df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in", "----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'}", "category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url =", "['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start']", "{'diff': 2, 'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff':", "return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names of all loaded", "to rank from SVD standardize : bool, default is False if True, then", "number of periods to lag for pct_change or diff operator annualize : int.", "open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear()", "T) C2 = min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2) / NT2,", "is vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y = s", "df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date df.index", "number of factors based on ICp{p} criterion, or 0 if not determined Notes", "contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred", "data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 +", "of residuals after k components lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2", "as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key", "= 17760704 end = 99991231 echo_ = config.ECHO api_key = None def header(self,", "bool (default is True) resample and replace date index with month ends at", "and vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage < 201500:", "Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial", "Author: <NAME> License: MIT \"\"\" import os import sys import json import io", "series data to an output file\"\"\" with open(savefile or self.savefile, 'wb') as f:", "None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations of", "p else kmax or len(M['s'])-1 # \"E\" step: update missing entries y =", "Returns ------- Series or DataFrame transformed values, name set to label if provided", "T)/(N + T) C2 = min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2)", "label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations", "= to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse", "transformed values, name set to label if provided else series_id \"\"\" if (series_id", "+ csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_ else: vintage", ": bool, default is False if True, then standardize data before processing (works", "1) * 100 pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1)", "incrementally adding factors Parameters ---------- x : 2D array T observations/samples in rows,", ": str, default is None base name of url, local file path or", "residuals after k components lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2 =", "* 100 # cca = Continuously Compounded Annual Rate of Change ((ln(x(t)) -", "data before processing (works better) Returns ------- r : int best number of", "df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if", "self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def", "'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub,", "len(r) else 0 # first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal", "beta is covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s,", "list of str ids of series to retrieve Returns ------- n : int", "args if args else '') r = requests_get(url, echo=echo) return dict() if r", "+ 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015", "last meta record of a series\"\"\" if series_id not in self.header_: try: if", "df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start'])", "Returns ------- x : 2D arrayint X with nan's replaced by PCA EM", "of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t)) \"\"\"", "f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id,", "= requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or", "['HWI', 'Help Wanted Index for United States'], ['AMDMNO', 'New Orders for Durable Goods'],", "df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None): c", "s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self, category_id, api=\"category\", api_key=None,", "pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1 = Percent Change", "and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231,", "xml.etree.ElementTree as ET import matplotlib.pyplot as plt from pandas.api import types import time", "FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P", "df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id] s = multpl(v) elif series_id", ": int, default is None Latest realtime_start date (inclusive) allowed Returns ------- out:", "if echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data to memory", "input date YYYYMM if url is None: then derive subfolder name from vintage", "Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper", "factor shift : int, default is 0 number of rows to shift output", "self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names of all loaded series", "f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def", "methods in Bai & Ng (2002) to auto-determine number in every iteration Returns", "0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods':", "(2002) criterion Parameters ---------- x : 2D array T observations/samples in rows, N", "is covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT", "/ (N * T)) # first case is when no factors used var", "api=\"category\", api_key=api_key) if 'categories' not in c: return None c = c['categories'][0] c['children']", "s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2 =", "call release : pd.DateOffset or int (default is 0) maximum release number or", "mean # undo standardization M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) #", "Quarterly # M = Monthly # BW = Biweekly # W = Weekly", "lin = Levels (No transformation) [default] # chg = Change x(t) - x(t-1)", "0, 'log': 0}, 'log': {'diff': 0, 'log': 1}} header_ = { k :", "end = 99991231 echo_ = config.ECHO api_key = None def header(self, series_id, column='title'):", "csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_ else: vintage =", "to adding one factor component values are the incremental R2 for the variable", "mR2 = [0] + list(s**2 / (N * T)) # first case is", "echo=ECHO): \"\"\"Determine number of factors based on Bai & Ng (2002) criterion Parameters", "= Biweekly # W = Weekly # D = Daily # Seasonal Adjustment", "of all series loaded \"\"\" df = DataFrame() keep = ['id', 'observation_start', 'observation_end',", "FRED api string format\"\"\" return ([_int2date(d) for d in date] if types.is_list_like(date) else", "= (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def get_category(self,", "not in row[0]: # this row has metadata, e.g. transform codes label =", "(No transformation) [default] # chg = Change x(t) - x(t-1) # ch1 =", "or list of str Labels of series to retrieve start, end : int,", "np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1))) # no row can be", "model and EM algorithm of <NAME> (1982), Stock & Watson (1998) and Bai", "1, 'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0,", "\"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x = beta.T", "errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest up to", "pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata", "= Annual # SA = Semiannual # Q = Quarterly # M =", "freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0)", "Parameters ---------- observations: DataFrame from FRED 'series/observations' api call release : pd.DateOffset or", "retrieve shiller series by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup =", "ends at selected freqs Returns ------- Series or DataFrame transformed values, name set", "Change ((x(t)/x(t-1)) - 1) * 100 pc1 = Percent Change from Year Ago", "start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series from alfred observations set Parameters", "for _, row in df.iloc[:5].iterrows(): if '/' not in row[0]: # this row", "= data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1", "0}, 2: {'diff': 1, 'log': 0}, 3: {'diff': 2, 'log': 0}, 4: {'diff':", "- FRED, ALFRED, revisions vintages - PCA, approximate factor model, EM algorithm Author:", "\"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if", "'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log': 1}} header_", "self.api_key) r = requests_get(url, echo=echo) if r is None: return DataFrame() contents =", "YYYYMM if url is None: then derive subfolder or zip archive name, from", "loaded series Parameters ---------- columns: list of str, default is None subset of", "length of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s", "C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:]", "df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df =", ":r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y] = y[Y] X", "class and methods to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions", "variables/features in columns kmax : int, default is None maximum number of factors.", ": {'id': k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus", "fixed as kmax. Else picks one of three methods in Bai & Ng", "str or int, default 0 (for current.csv) file name relative to base url", "str in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default is True) resample", "for _ in range(t['log']): df = np.log(df) for _ in range(t['diff']): #df =", "----- See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :]", "= (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False) # increase in R2", "'converge', 'n_iter' \"\"\" X = X.copy() # passed by reference Y = np.isnan(X)", "or int, default 0 (for current.csv) file name relative to base url or", "is 0 number of times to take difference pct_change : bool whether to", "lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2 = (N * T)/(N +", "df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release,", "= df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df * t['annualize'] # by adding", "(2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y", "df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in", "url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_", "replace with column means X[Y[:, col], col] = np.nanmean(X[:, col]) M = dict()", "https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 =", "'A'. 'Q', 'D', 'Y'} or bool (default is True) resample and replace date", "apply freq : str in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default", "api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None: url =", "None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if", "McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x)", "a, b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert FRED", "from io import StringIO import pickle import zipfile import re import xml.etree.ElementTree as", "New label to rename returned series release : pd.DateOffset or int (default is", "to retrieve full observations of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id,", "If 0, number of factors is fixed as kmax. Else picks one of", "Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1) * 100", "FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url", "from McCracken FRED-MD site Parameters ---------- vintage : str or int, default 0", "to take log diff : int, default is 0 number of times to", "in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns or", "+ 1)]) mR2 = mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, -", "== (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release", "in c: return None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', [])", "start=start, end=end) for s in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_)", "isinstance(release, int): # keep latest up to max release df['release'] = df.groupby('date').cumcount() df", "if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller", "['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500', \"S&P's Common Stock Price Index:", "0, number of factors is fixed as kmax. Else picks one of three", "['S&P: indust']: s = Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date']", "def header(self, series_id, column='title'): \"\"\"Returns a column from last meta record of a", "adding one factor component values are the incremental R2 for the variable in", "isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage", "np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df", "or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release up through", "True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1,", "T observations/samples in rows, N variables/features in columns p : int in [1,", "(((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 # cch = Continuously Compounded Rate", "\"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'],", "'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12},", "requests_get(url, echo=echo) if r is None: return DataFrame() v = json.loads(r.content) df =", "import StringIO import pickle import zipfile import re import xml.etree.ElementTree as ET import", "of url, local file path or zipfile archive Returns ------- df : DataFrame", "= np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1))) # no row can", "str meta[label] = row[1:].astype(int).to_dict() # as dict of int codes df = df[df.iloc[:,", "each period date, optionally indexed by realtime_start Examples -------- \"\"\" df = observations.copy()", "col]) M = dict() # latest fitted model parameters for M['n_iter'] in range(1,", "md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df,", "times to take difference pct_change : bool whether to apply pct_change operator periods", "self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns or keep]", "dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in series_id] series", "Change from Year Ago x(t) - x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1))", "pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0)", "<= min(end, vintage)) & (df.index >= start)] def series(self, series_id, api_key=None, start=None, end=None,", "pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'],", "= df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index >= start)] def series(self,", "PCA, approximate factor model, EM algorithm Author: <NAME> License: MIT \"\"\" import os", "args=\"&\" + args if args else '') r = requests_get(url, echo=echo) return dict()", "c: return None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series']", "# latest fitted model parameters for M['n_iter'] in range(1, n_iter + 1): old", "maximum release number or date offset (inclusive). If 0: latest vintage : int,", "if vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD", "this row has metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) #", ": int, default is None latest realtime_start date of observations to keep diff,", "returned series release : pd.DateOffset or int (default is 0) maximum release number", "u: T x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T", "Adjusted Annual Rate # SSA = Smoothed Seasonally Adjusted # NA = Not", "= self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit']", "as np import pandas as pd from pandas import DataFrame, Series from pandas.tseries.offsets", "#return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS',", "(2002) Parameters ---------- X : 2D array T observations/samples in rows, N variables/features", "series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif", "annualization factor shift : int, default is 0 number of rows to shift", ": DataFrame input data tcode : int in {1, ..., 7}, default is", "by PCA EM model : dict Model results 'u', 's', 'vT', 'kmax', 'converge',", "FRED, ALFRED, revisions vintages - PCA, approximate factor model, EM algorithm Author: <NAME>", "zipfile archive, or int date YYYYMM url : str, default is None base", "BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based on Bai &", "[[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert FRED api string format", "_date2int(date): \"\"\"helper method to convert FRED api string format to int date\"\"\" return", "None maximum number of factors. If None, set to rank from SVD standardize", "self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data to memory cache from saved", "self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str) \\ else", ">= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data", "end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations of a series as", "***\") def keys(self): \"\"\"Return id names of all loaded series data\"\"\" return list(self.cache_.keys())", "= requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or", "method to convert int date to FRED api string format\"\"\" return ([_int2date(d) for", "'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'})", "in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO',", "((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT =", "series release : pd.DateOffset or int (default is 0) maximum release number or", "or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if", "derive vintage csv file name from input date YYYYMM if url is None:", "api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None: return DataFrame()", "{ k : {'id': k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial", "{series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names of", "'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly", "or DataFrame transformed values, name set to label if provided else series_id \"\"\"", "vintage csv file name from input date YYYYMM if url is None: then", "or series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id,", "update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield',", "return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs):", "label if provided else series_id \"\"\" if (series_id not in self.cache_ and not", "Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t))", "max release df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1 == (release or", "of periods to lag for pct_change or diff operator annualize : int. default", "<= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df =", "api_key = None def header(self, series_id, column='title'): \"\"\"Returns a column from last meta", "Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded Annual Rate of", "in ['S&P: indust']: s = Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA'))", "f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names of all loaded series data\"\"\"", "= 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta", "DataFrame indexed by end-of-month date Notes ----- if vintage is int: then derive", "len(M['s'])-1) if p else kmax or len(M['s'])-1 # \"E\" step: update missing entries", "df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x')", "int length of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for", "factors. If None, set to rank from SVD standardize : bool, default is", "YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date']", "**kwargs): \"\"\"Select from full observations of a series and apply transforms Parameters ----------", "c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset = 0 while", "\"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df =", "Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply", "= self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r", "list of str Labels of series to retrieve start, end : int, default", "# keep latest up to max release df['release'] = df.groupby('date').cumcount() df = df[df['release']", ": str, default is None New label to rename returned series release :", "start=19590101, freq='M'): \"\"\"Retrieve a raw series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls", "* 100 # pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1)", "dict transformation codes Notes ----- lin = Levels (No transformation) [default] chg =", "in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in", "with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage),", "header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for _, row in df.iloc[:5].iterrows(): if", "step X[Y] = y[Y] X = (X * std) + mean # undo", "before processing (works better) Returns ------- r : int best number of factors", "7}, default is 1 transformation code freq : str in {'M', 'Q', 'A'},", "in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is None", "= self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or", "freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1)", "value of each period date, optionally indexed by realtime_start Examples -------- \"\"\" df", "__init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access", "isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True,", "columns=None): \"\"\"Return headers (last metadata row) of all loaded series Parameters ---------- columns:", "+ 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else", "end=end or self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values", "not in self.header_: try: if series_id not in self.cache_: # load via api", "'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log': 1}} header_ = {", "- stromg that indicates a data value transformation. # lin = Levels (No", "api if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id}", "revisions vintages - PCA, approximate factor model, EM algorithm Author: <NAME> License: MIT", "int, default is None Latest realtime_start date (inclusive) allowed Returns ------- out: Series", "default list of columns to display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1],", "Series or DataFrame transformed values, name set to label if provided else series_id", "\"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI',", "requests_get from .busday import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url =", "if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert int date to", "np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2 = [0] + list(s**2", "Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 # cch", "y = pca.transform(x) # y = s * u: T x n \"projection\"", "matplotlib.pyplot as plt from pandas.api import types import time from .edgar import requests_get", "freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations Parameters ---------- data : DataFrame", "Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def", "data value transformation. # lin = Levels (No transformation) [default] # chg =", "annualize : int. default is 1 annualization factor shift : int, default is", "'realtime_start'] = df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None): c = self.category(category_id,", "from vintage \"\"\" url = url or _fred_md_url if isinstance(vintage, int) and vintage:", "freq=True, **kwargs): \"\"\"Select from full observations of a series and apply transforms Parameters", "to convert FRED api string format to int date\"\"\" return ([_date2int(d) for d", "dict() # latest fitted model parameters for M['n_iter'] in range(1, n_iter + 1):", "------- r : int best number of factors based on ICp{p} criterion, or", "# chg = Change x(t) - x(t-1) # ch1 = Change from Year", "datetime import datetime, date import requests from bs4 import BeautifulSoup from io import", "as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key", "observations/samples in rows, N variables/features in columns kmax : int, default is None", "tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations Parameters ---------- data :", "yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common", "EM algorithm Author: <NAME> License: MIT \"\"\" import os import sys import json", "to FRED api string format\"\"\" return ([_int2date(d) for d in date] if types.is_list_like(date)", "df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve", "series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div", "**kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return", "savefile=None): \"\"\"Load series data to memory cache from saved file\"\"\" with open(savefile or", "Change from Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) -", "DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def", "M = Monthly # BW = Biweekly # W = Weekly # D", "import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import datetime,", "\"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations,", "df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in", "= csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url", "FRED api Parameters ---------- series_id : str or list of str ids of", "= Quarterly # M = Monthly # BW = Biweekly # W =", "< tol: break return X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine", "output file\"\"\" with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_)", "freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start']", "number of factors r = BaiNg(X, p, kmax or len(M['s'])-1) if p else", "convert FRED api string format to int date\"\"\" return ([_date2int(d) for d in", "self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id}", "pandas.api import types import time from .edgar import requests_get from .busday import str2date,", "dates log : int, default is 0 number of times to take log", "= np.linalg.svd(X) # auto-select number of factors if p>0 else fix number of", "= Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca =", "md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url", "x = beta.T @ beta is covariance matrix \"\"\" if standardize: x =", "- 1) * 100 # cch = Continuously Compounded Rate of Change (ln(x(t))", "Frequency # A = Annual # SA = Semiannual # Q = Quarterly", "McCracken FRED-MD site Parameters ---------- vintage : str or int, default 0 (for", "manipulating retrieved data series Parameters ---------- cache_ : dict cached series and observations", "transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations Parameters ----------", "return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating", "= io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else: df =", "self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000,", "is 1 annualization factor shift : int, default is 0 number of rows", "'') r = requests_get(url, echo=echo) return dict() if r is None else json.loads(r.content)", "subfolder name from vintage \"\"\" url = url or _fred_md_url if isinstance(vintage, int)", "'log': 0}, 'log': {'diff': 0, 'log': 1}} header_ = { k : {'id':", "= { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return", "def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv from McCracken", "= df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label", "\"\"\"Save all memory-cached series data to an output file\"\"\" with open(savefile or self.savefile,", "BW = Biweekly # W = Weekly # D = Daily # Seasonal", "= Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s", "@ u.T @ x)**2, axis=0).reshape(1, - 1) return mR2 # units - stromg", "0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize':", "Returns ------- r : int best number of factors based on ICp{p} criterion,", "incremental R2 for the variable in the column Notes ----- See <NAME> Ng", "(\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231 echo_ = config.ECHO api_key", "config.ECHO api_key = None def header(self, series_id, column='title'): \"\"\"Returns a column from last", "import zipfile import re import xml.etree.ElementTree as ET import matplotlib.pyplot as plt from", "\"\"\"helper method to convert int date to FRED api string format\"\"\" return ([_int2date(d)", "{'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff':", "undo standardization M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if", "SA = Semiannual # Q = Quarterly # M = Monthly # BW", "echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\"", "\"\"\"Return id names of all loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None):", "retrieve full observations of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start", "end=_int2date(end or self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is", "'log': {'diff': 0, 'log': 1}} header_ = { k : {'id': k, 'title':", "['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index", "from adding kth (orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T", "Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr # log = Natural Log", "* n_obs_per_yr # log = Natural Log ln(x(t)) # Frequency # A =", "DataFrame headers of all series loaded \"\"\" df = DataFrame() keep = ['id',", "factor model, EM algorithm Author: <NAME> License: MIT \"\"\" import os import sys", "df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in", "# default list of columns to display for v in self.cache_.values(): df =", "or int, default 0 (i.e. current.csv) file name relative to base url or", "\"file_type=json{args}\").format start = 17760704 end = 99991231 echo_ = config.ECHO api_key = None", "in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if", "(default is True) resample and replace date index with month ends at selected", "return df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to", "np.nanmean(X[:, col]) M = dict() # latest fitted model parameters for M['n_iter'] in", "else series_id \"\"\" if (series_id not in self.cache_ and not self.get(series_id)): return None", "is 0 number of rows to shift output (negative to lag) \"\"\" t", "indexed by end-of-month date Notes ----- if csvfile is int: then derive vintage", "return dict() if r is None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod", "s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id)", "freq is True: freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage,", "= savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self,", "min(end, vintage)) & (df.index >= start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO):", "else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return s[s.index", "return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in", "= echo def _print(self, *args, echo=None): if echo or self.echo_: print(*args) def load(self,", "list(s**2 / (N * T)) # first case is when no factors used", "self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None:", "0 number of rows to shift output (negative to lag) \"\"\" t =", "int (default is 0) maximum release number or date offset (inclusive). If 0:", "# first case is when no factors used var = (sum(mR2) - np.cumsum(mR2))", "df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest up to max release df['release']", "or PCp3 penalty kmax : int, default is None maximum number of factors.", "of <NAME> (1982), Stock & Watson (1998) and Bai & Ng (2002) Parameters", "up to max release df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1 ==", "* 100 # cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1)))", "return min(r) if len(r) else 0 # first min point def marginalR2(x, kmax=None,", "then standardize data before processing (works better) Returns ------- mR2 : 2D array", "(x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False) # increase in R2 from", "------- n : int length of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s,", "= dict() for _, row in df.iloc[:5].iterrows(): if '/' not in row[0]: #", "latest release up through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df =", "False if True, then standardize data before processing (works better) Returns ------- r", "fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end", "first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable", "'%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve", "Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's", "requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key)", "\"\"\"Create object, with api_key, for FRED access and data manipulation\"\"\" self.api_key = api_key", "- old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol:", "((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr # log = Natural Log ln(x(t))", "name from input date YYYYMM if url is None: then derive subfolder name", "archive name, from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv',", "series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to update FRED-MD dataset Notes -----", "Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr # log =", "#df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize'])", "details = [tag.get('href').split('/')[-1] for tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details", "def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor", "pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y =", "\"\"\"Classmethod to select a series from alfred observations set Parameters ---------- observations: DataFrame", "M['vT'] = np.linalg.svd(X) # auto-select number of factors if p>0 else fix number", "+ 2)] sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if", "0: latest vintage : int, default is None latest realtime_start date of observations", "pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for _, row in", ": int, default is None Maximum number of factors. If None, set to", "int): # keep latest up to max release df['release'] = df.groupby('date').cumcount() df =", "None base name of url, local file path or zipfile archive Returns -------", "\"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs):", "with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self):", "from bs4 import BeautifulSoup from io import StringIO import pickle import zipfile import", "ids of series to retrieve Returns ------- n : int length of observations", "t.update(kwargs) df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df", "row in df.iloc[:5].iterrows(): if '/' not in row[0]: # this row has metadata,", "> 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags", "if vintage is int: then derive vintage csv file name from input date", "= np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge']", "%Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df", "s, vT = np.linalg.svd(x, full_matrices=False) # increase in R2 from adding kth (orthogonal)", "np.flatnonzero(sign>0) return min(r) if len(r) else 0 # first min point def marginalR2(x,", "p : int in [0, 1, 2, 3], default is 2 (i.e. 'ICp2'", "p, kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1 # \"E\" step:", "or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def", "if freq is True: freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release,", "FRED-MD site Parameters ---------- vintage : str or int, default 0 (i.e. current.csv)", "parse current or vintage csv from McCracken FRED-MD site Parameters ---------- vintage :", "np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r) else 0 #", "- 1 cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca", "(2002) to auto-determine number in every iteration Returns ------- x : 2D arrayint", "['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)]", "start self.end = end self.savefile = savefile self.cache_ = dict() self.header_ = Alfred.header_.copy()", "if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date'] +=", "# this row has metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower())", "beta.T @ beta is covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1)", "if p else kmax or len(M['s'])-1 # \"E\" step: update missing entries y", "metadata row) of all loaded series Parameters ---------- columns: list of str, default", "as plt from pandas.api import types import time from .edgar import requests_get from", "names of all loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers", "Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate of Change (ln(x(t)) -", "(sum(mR2) - np.cumsum(mR2)) # variance of residuals after k components lnvar = np.log(np.where(var", "self.header_: try: if series_id not in self.cache_: # load via api if not", "Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 # cch = Continuously Compounded", "array T observations/samples in rows, N variables/features in columns p : int in", "+ T) C2 = min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2) /", "1 number of periods to lag for pct_change or diff operator annualize :", "df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <=", "not in self.cache_: # load via api if not in cache self.get(series_id) self.header_[series_id]", "api_key=api_key).get('categories', []) c['series'] = [] offset = 0 while True: s = self.category(category_id,", "label to rename returned series release : pd.DateOffset or int (default is 0)", "\"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\"", "return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id", "df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0,", "Parameters ---------- cache_ : dict cached series and observations tcode_ : dict transformation", "Labels of series to retrieve start, end : int, default is None start", "DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date", "and manipulating retrieved data series Parameters ---------- cache_ : dict cached series and", "Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr # log", "to rank from SVD minus 1 p : int in [0, 1, 2,", "= df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\",", "from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca = Compounded Annual", "FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA, approximate factor model, EM algorithm", "component values are the incremental R2 for the variable in the column Notes", "'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y',", "processing (works better) Returns ------- r : int best number of factors based", "or self.api_key) r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series\",", "factor model and EM algorithm of <NAME> (1982), Stock & Watson (1998) and", "in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if", "display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False)", "Change ((x(t)/x(t-1)) - 1) * 100 # pc1 = Percent Change from Year", "'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div,", "dict() for _, row in df.iloc[:5].iterrows(): if '/' not in row[0]: # this", "'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def", "Percent Change ((x(t)/x(t-1)) - 1) * 100 # pc1 = Percent Change from", "echo_ = config.ECHO api_key = None def header(self, series_id, column='title'): \"\"\"Returns a column", "to retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()])", "http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if", "----- See Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize:", "transformation) [default] # chg = Change x(t) - x(t-1) # ch1 = Change", "echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break return X, M def", "a series from alfred observations set Parameters ---------- observations: DataFrame from FRED 'series/observations'", "McCracken FRED-MD site Parameters ---------- vintage : str or int, default 0 (i.e.", "path or zipfile archive Returns ------- df : DataFrame indexed by end-of-month date", "start=None, end=None): \"\"\"Retrieve metadata and full observations of a series with FRED api", "file\"\"\" with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def", "api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data as dict\"\"\" args =", "of a series and apply transforms Parameters ---------- series_id : str or list", "credit to Personal Income'], ['S&P div yield', \"S&P's Composite Common Stock: Dividend Yield\"],", "def popular(self, page=1): \"\"\"Classmethod to web scrape popular series names, by page number\"\"\"", "'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff':", "_print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict()", "df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep", "import requests_get from .busday import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url", "for Alfred/Fred access, and manipulating retrieved data series Parameters ---------- cache_ : dict", "Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log': 0}, 2: {'diff':", "rows, N variables/features in columns kmax : int, default is None Maximum number", "ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) # pch = Percent", "= (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231 echo_ = config.ECHO", "self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series}", "99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release up through date", "better) Returns ------- r : int best number of factors based on ICp{p}", "_, row in df.iloc[:5].iterrows(): if '/' not in row[0]: # this row has", "def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata", "df = df * t['annualize'] # by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\"", "offset = 0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not", "'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage", "@ pca.components_ # \"loadings\" x.T @ x = beta.T @ beta is covariance", "min(r) if len(r) else 0 # first min point def marginalR2(x, kmax=None, standardize=False):", "matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x,", "ICp{p} criterion, or 0 if not determined Notes ----- See Bai and Ng", "- 1) return mR2 # units - stromg that indicates a data value", "optionally indexed by realtime_start Examples -------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'],", "latest up to max release df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1", "zipfile archive Returns ------- df : DataFrame indexed by end-of-month date Notes -----", "['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0]", "auto-determine number in every iteration Returns ------- x : 2D arrayint X with", "Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_", "end : int, default is None start and end period dates (inclusive) to", ": int in [0, 1, 2, 3], default is 2 (i.e. 'ICp2' criterion)", "= Change from Year Ago x(t) - x(t-n_obs_per_yr) # pch = Percent Change", "default 0 (i.e. current.csv) file name relative to base url or zipfile archive,", "then derive subfolder name from vintage \"\"\" url = url or _fred_md_url if", "% 100:02d}.csv\" if vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ =", "return ([_date2int(d) for d in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def", "latest vintage : int, default is None latest realtime_start date of observations to", "to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield':", "BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1 # \"E\"", "['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1,", "100 # cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) *", "Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url +", "is None: then derive subfolder or zip archive name, from vintage Examples --------", "Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log': 0}, 2: {'diff': 1,", "0, 'log': 0}, 2: {'diff': 1, 'log': 0}, 3: {'diff': 2, 'log': 0},", "attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags] return details #tags =", "for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:, col], col]", "= _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index >=", "standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False) # increase", "to keep diff, log, pct_change : int number of difference, log and pct_change", "'', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller series by parsing multpl.com", "Seasonally Adjusted # SAAR = Seasonally Adjusted Annual Rate # SSA = Smoothed", "- x(t-1) ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) pch =", "= {1: {'diff': 0, 'log': 0}, 2: {'diff': 1, 'log': 0}, 3: {'diff':", "to return Returns ------- df : DataFrame headers of all series loaded \"\"\"", "or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data to memory cache from", "data with factor model and EM algorithm of <NAME> (1982), Stock & Watson", "api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations of a", "types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller series", "retrieve start, end : int, default is None start and end period dates", "end-of-month date Notes ----- if csvfile is int: then derive vintage csv file", "str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred:", "M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of factors if p>0 else", "re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict() # as dict", "u, s, vT = np.linalg.svd(x, full_matrices=False) # increase in R2 from adding kth", "observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in series_id]", "'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() # passed by reference Y", "- x(t-1) # ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) #", "meta[label] = row[1:].astype(int).to_dict() # as dict of int codes df = df[df.iloc[:, 0].str.find('/')", "& Ng (2002) to auto-determine number in every iteration Returns ------- x :", "datetime, date import requests from bs4 import BeautifulSoup from io import StringIO import", "x)**2, axis=0) for k in (np.arange(kmax or len(s)) + 1)]) mR2 = mR2", "in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id", "-------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df", "# W = Weekly # D = Daily # Seasonal Adjustment # SA", "X = X.copy() # passed by reference Y = np.isnan(X) # identify missing", "ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA, approximate factor", "echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access and data manipulation\"\"\" self.api_key =", "retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url", "Q = Quarterly # M = Monthly # BW = Biweekly # W", "_int2date(date): \"\"\"helper method to convert int date to FRED api string format\"\"\" return", "ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1]) r", "of header columns to return Returns ------- df : DataFrame headers of all", "types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in series_id] series = self.series(series_id, api_key=api_key,", "= adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq),", "= np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax", "str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq)", "based on ICp{p} criterion, or 0 if not determined Notes ----- See Bai", "self.echo_ = echo def _print(self, *args, echo=None): if echo or self.echo_: print(*args) def", "label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of a series and apply", "None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape popular series", "'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags]", "is False if True, then standardize data before processing (works better) Returns -------", "= Levels (No transformation) [default] chg = Change x(t) - x(t-1) ch1 =", "= _fred_md_url if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage %", "df.columns = df.columns.str.rstrip('x') meta = dict() for _, row in df.iloc[:5].iterrows(): if '/'", "0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change':", "api_key, for FRED access and data manipulation\"\"\" self.api_key = api_key self.start = start", "- 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] -", "True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0,", "break c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO,", "retrieve Returns ------- n : int length of observations dataframe \"\"\" if types.is_list_like(series_id):", "# cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100", "operator annualize : int. default is 1 annualization factor shift : int, default", "load(self, savefile=None): \"\"\"Load series data to memory cache from saved file\"\"\" with open(savefile", "= df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end']", "(df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int)", "'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin':", "end-of-month date Notes ----- if vintage is int: then derive vintage csv file", "[] offset = 0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if", "= self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset = 0 while True:", "kmax. Else picks one of three methods in Bai & Ng (2002) to", "Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca = Compounded Annual Rate of", "= json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred by", "'n_iter' \"\"\" X = X.copy() # passed by reference Y = np.isnan(X) #", "2: {'diff': 1, 'log': 0}, 3: {'diff': 2, 'log': 0}, 4: {'diff': 0,", "@classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a", "def _print(self, *args, echo=None): if echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load", "'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log':", "c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category", "k in (np.arange(kmax or len(s)) + 1)]) mR2 = mR2 / np.mean((u @", "Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod", "False if True, then standardize data before processing (works better) Returns ------- mR2", "Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip')", "from .busday import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/'", "'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns to display", "M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d}", "period date, optionally indexed by realtime_start Examples -------- \"\"\" df = observations.copy() df['value']", ".busday import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def", ": int, default is None start and end period dates (inclusive) to keep", "> 0] # keep rows with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y',", "Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P div yield', \"S&P's", "data before processing (works better) Returns ------- mR2 : 2D array each row", "f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'})", "\"E\" step X[Y] = y[Y] X = (X * std) + mean #", "for pct_change or diff operator annualize : int. default is 1 annualization factor", "fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO',", "release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] =", "std # standardize # \"M\" step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X)", "lag for pct_change or diff operator annualize : int. default is 1 annualization", "'Q', 'A'}, default is None set periodicity of dates log : int, default", "column from last meta record of a series\"\"\" if series_id not in self.header_:", "freq=None): \"\"\"Classmethod to select a series from alfred observations set Parameters ---------- observations:", "# D = Daily # Seasonal Adjustment # SA = Seasonally Adjusted #", "'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw", "+ 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_ =", "def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full", "Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil,", "df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return", "------- df : DataFrame headers of all series loaded \"\"\" df = DataFrame()", "100:02d}.csv\" if vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical", "= np.log(np.where(var > 0, var, 1e-26)) NT2 = (N * T)/(N + T)", "M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if", "* 100 # pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr))", "by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables", ".drop_duplicates('date', keep='first') else: # else latest release up through date offset df['release'] =", "json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape popular series names, by", "(X - mean) / std # standardize # \"M\" step: estimate factors M['u'],", "'log': 0}, 3: {'diff': 2, 'log': 0}, 4: {'diff': 0, 'log': 1}, 5:", "0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log': 0},", "end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0,", "= DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v", "in R2 from adding kth (orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k]", "----- if csvfile is int: then derive vintage csv file name from input", "offset (inclusive). If 0: latest vintage : int, default is None latest realtime_start", "alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231,", "keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for", "# \"E\" step X[Y] = y[Y] X = (X * std) + mean", "self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self,", "all missing assert(not np.any(np.all(Y, axis=0))) # no column can be all missing for", "else: csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_ else: vintage = vintage", "def _date2int(date): \"\"\"helper method to convert FRED api string format to int date\"\"\"", "x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1", ": str or int, default 0 (i.e. current.csv) file name relative to base", "keep diff, log, pct_change : int number of difference, log and pct_change operations", "ln(x(t-1))) * 100) * n_obs_per_yr # log = Natural Log ln(x(t)) # Frequency", "end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of a", "if series is None or series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations(", "algorithm of <NAME> (1982), Stock & Watson (1998) and Bai & Ng (2002)", "def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data", "'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M':", "DataFrame transformed values, name set to label if provided else series_id \"\"\" if", "retrieved data series Parameters ---------- cache_ : dict cached series and observations tcode_", "= c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset =", "url or url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as", "# pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1)", "\"\"\" import os import sys import json import io import numpy as np", "better) Returns ------- mR2 : 2D array each row corresponds to adding one", "self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] =", "@ M['vT'][:r, :] # \"E\" step X[Y] = y[Y] X = (X *", "SA = Seasonally Adjusted # NSA = Not Seasonally Adjusted # SAAR =", "% 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0)", "all series loaded \"\"\" df = DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short',", "start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo)", "'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS':", "return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv", "series_id in ['S&P: indust']: s = Series() elif series_id in ['CLAIMS']: df =", "None def header(self, series_id, column='title'): \"\"\"Returns a column from last meta record of", "f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names", "from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url +", "[tag.get('value') for tag in tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER',", "2D array T observations/samples in rows, N variables/features in columns kmax : int,", "def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in", "---------- cache_ : dict cached series and observations tcode_ : dict transformation codes", "df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True)", "DataFrame input data tcode : int in {1, ..., 7}, default is 1", "# convert fred to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values", "can be all missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column", "EM algorithm of <NAME> (1982), Stock & Watson (1998) and Bai & Ng", "identify missing entries assert(not np.any(np.all(Y, axis=1))) # no row can be all missing", "csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_ else: vintage = vintage or", "r = BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1", "components lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2 = (N * T)/(N", "df = df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize']) - 1 #", "and observations tcode_ : dict transformation codes Notes ----- lin = Levels (No", "from FRED 'series/observations' api call release : pd.DateOffset or int (default is 0)", "#mR2 = np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s),", "vintage = csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url =", "numpy as np import pandas as pd from pandas import DataFrame, Series from", "shift : int, default is 0 number of rows to shift output (negative", "data manipulation\"\"\" self.api_key = api_key self.start = start self.end = end self.savefile =", "start=start, end=end, echo=self.echo_) if series is None or series.empty: return 0 self.cache_[series_id] =", "df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value']", "as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id,", "mean) / std # standardize # \"M\" step: estimate factors M['u'], M['s'], M['vT']", "YYYYMM if url is None: then derive subfolder name from vintage \"\"\" url", "of a series with FRED api Parameters ---------- series_id : str or list", "periodicity of dates log : int, default is 0 number of times to", "list of columns to display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True)", "------- x : 2D arrayint X with nan's replaced by PCA EM model", "full observations of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or", "standardize : bool, default is False if True, then standardize data before processing", "start, end : int, default is None start and end period dates (inclusive)", "assert(not np.any(np.all(Y, axis=1))) # no row can be all missing assert(not np.any(np.all(Y, axis=0)))", "is 2 (i.e. 'ICp2' criterion) If 0, number of factors is fixed as", "name of url, local file path or zipfile archive Returns ------- df :", "kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable from incrementally adding factors Parameters", "mR2 : 2D array each row corresponds to adding one factor component values", "popular(self, page=1): \"\"\"Classmethod to web scrape popular series names, by page number\"\"\" assert(page", "0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca':", "as f: df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns", "load via api if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return", "If None, set to rank from SVD minus 1 p : int in", "= x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax", "return X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors", "requests_get(url, echo=echo) if r is None: return DataFrame() contents = json.loads(r.content) df =", "<NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is", "Latest realtime_start date (inclusive) allowed Returns ------- out: Series value of each period", "\"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF',", "# post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int) and vintage: csvfile_ =", "DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default", "0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True,", "+= pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)):", "series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata as dataframe\"\"\"", "to web scrape popular series names, by page number\"\"\" assert(page > 0) url", "if csvfile is int: then derive vintage csv file name from input date", "'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s = Series() elif", ": 2D array T observations/samples in rows, N variables/features in columns kmax :", "1) * 100 # pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) -", "wrapper to retrieve full observations of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\",", "if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return", "self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or", "Ng (2002) Parameters ---------- X : 2D array T observations/samples in rows, N", "= multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq)", "of str ids of series to retrieve Returns ------- n : int length", "{'diff': 1, 'log': 1}, 6: {'diff': 2, 'log': 1}, 7: {'diff': 1, 'log':", "k components lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2 = (N *", "return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or", ": DataFrame indexed by end-of-month date Notes ----- if vintage is int: then", "values(self, columns=None): \"\"\"Return headers (last metadata row) of all loaded series Parameters ----------", "compounding for _ in range(t['log']): df = np.log(df) for _ in range(t['diff']): #df", "json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self, series_id, api_key=None,", "= np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax or len(s))", "(1998) and Bai & Ng (2002) Parameters ---------- X : 2D array T", "0: latest vintage : int, default is None Latest realtime_start date (inclusive) allowed", "freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0)", "json import io import numpy as np import pandas as pd from pandas", "None Maximum number of factors. If None, set to rank from SVD minus", "data : DataFrame input data tcode : int in {1, ..., 7}, default", ": int, default is 0 number of times to take difference pct_change :", "if isinstance(release, int): # keep latest up to max release df['release'] = df.groupby('date').cumcount()", "0 (i.e. current.csv) file name relative to base url or zipfile archive, or", "kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based on Bai & Ng (2002)", "x(t-1) # ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) # pch", "None subset of header columns to return Returns ------- df : DataFrame headers", "vintage csv from McCracken FRED-MD site Parameters ---------- vintage : str or int,", "api call release : pd.DateOffset or int (default is 0) maximum release number", "** (n_obs_per_yr)) - 1) * 100 # cch = Continuously Compounded Rate of", "fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD", "kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if args", "keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release up through date offset df['release']", "std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X - mean) / std", "Ng (2002) to auto-determine number in every iteration Returns ------- x : 2D", "not in self.cache_ and not self.get(series_id)): return None if freq is True: freq", "axis=0) for k in (np.arange(kmax or len(s)) + 1)]) mR2 = mR2 /", "= row[1:].astype(int).to_dict() # as dict of int codes df = df[df.iloc[:, 0].str.find('/') >", ": DataFrame headers of all series loaded \"\"\" df = DataFrame() keep =", "= df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize']) -", "'3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of", "a raw series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller =", "<= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int):", "from input date YYYYMM if url is None: then derive subfolder or zip", "Personal Income'], ['S&P div yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE", "(inclusive) to keep label : str, default is None New label to rename", "12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12},", "= pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date'] +=", "Ago x(t) - x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) - 1) *", "'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to update", "self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\"", "np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] <", "realtime_start Examples -------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] =", "csvfile is int: then derive vintage csv file name from input date YYYYMM", "Composite Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings", "observations of a series with FRED api Parameters ---------- series_id : str or", "in every iteration Returns ------- x : 2D arrayint X with nan's replaced", "u.T @ x)**2, axis=0).reshape(1, - 1) return mR2 # units - stromg that", "= fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt =", "else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for", "default is 2 (i.e. 'ICp2' criterion) If 0, number of factors is fixed", "df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with", "'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to update FRED-MD", "# identify missing entries assert(not np.any(np.all(Y, axis=1))) # no row can be all", "types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper", "names, by page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content", "criterion) If 0, number of factors is fixed as kmax. Else picks one", "= _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage))", "from alfred observations set Parameters ---------- observations: DataFrame from FRED 'series/observations' api call", ": str or int, default 0 (for current.csv) file name relative to base", "header columns to return Returns ------- df : DataFrame headers of all series", "(No transformation) [default] chg = Change x(t) - x(t-1) ch1 = Change from", "kmax : int, default is None Maximum number of factors. If None, set", "Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]}", "Parameters ---------- vintage : str or int, default 0 (i.e. current.csv) file name", "None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if", "= (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1]) r =", "True) resample and replace date index with month ends at selected freqs Returns", "list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata row) of all loaded series", "if url is None: then derive subfolder name from vintage \"\"\" url =", "If 0: latest vintage : int, default is None Latest realtime_start date (inclusive)", "df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\"", "return mR2 # units - stromg that indicates a data value transformation. #", "Notes ----- lin = Levels (No transformation) [default] chg = Change x(t) -", "df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta)", "log = Natural Log ln(x(t)) # Frequency # A = Annual # SA", "s = self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2,", "Annual # SA = Semiannual # Q = Quarterly # M = Monthly", "with column means X[Y[:, col], col] = np.nanmean(X[:, col]) M = dict() #", "from Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1)", "or 'monthly/current.csv' _print(vintage, echo=echo) url = url or url_ if url.endswith('.zip'): if url.startswith('http'):", "'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False,", "echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data to memory cache", "from Year Ago x(t) - x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) -", "Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI", "number of rows to shift output (negative to lag) \"\"\" t = {'periods':1,", "# SA = Seasonally Adjusted # NSA = Not Seasonally Adjusted # SAAR", "= df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def", "if provided else series_id \"\"\" if (series_id not in self.cache_ and not self.get(series_id)):", "url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int) and", "Continuously Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log =", "See Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x", "'New Orders for Durable Goods'], ['S&P 500', \"S&P's Common Stock Price Index: Composite\"],", "-1) u, s, vT = np.linalg.svd(x, full_matrices=False) # increase in R2 from adding", "tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags]", "@ x = beta.T @ beta is covariance matrix \"\"\" if standardize: x", "through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\", "else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url or url_", "100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_", "yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s =", "p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based on Bai & Ng", "return None if freq is True: freq = self.header(series_id, 'frequency_short') df = self.as_series(", "if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else:", "Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual Rate", "= df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest up to max release", "or self.start, end=end or self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start']", "details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE':", "def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from", "= self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is None or series.empty: return", "'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for", "all memory-cached series data to an output file\"\"\" with open(savefile or self.savefile, 'wb')", "keep='first') else: # else latest release up through date offset df['release'] = (df['date']", "return [self.get(s, start=start, end=end) for s in series_id] series = self.series(series_id, api_key=api_key, start=start,", "default is None subset of header columns to return Returns ------- df :", "in df.iloc[:5].iterrows(): if '/' not in row[0]: # this row has metadata, e.g.", "= soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags] return details", "pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta =", "\"E\" step: update missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r,", "transformation. # lin = Levels (No transformation) [default] # chg = Change x(t)", "default is None base name of url, local file path or zipfile archive", "_fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if", "'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI':", "+ mean # undo standardization M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2)", "k : {'id': k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper", "Returns ------- df : DataFrame indexed by end-of-month date Notes ----- if vintage", "reference Y = np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1))) # no", "Semiannual # Q = Quarterly # M = Monthly # BW = Biweekly", "and parse current or vintage csv from McCracken FRED-MD site Parameters ---------- vintage", "d in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method", "popular series names, by page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data", "pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import", "methods to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages -", "Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded Annual Rate of Change", "and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv'", "n_iter + 1): old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1)", "Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual Rate of Change (ln(x(t))", "mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X - mean) /", "array each row corresponds to adding one factor component values are the incremental", "end self.savefile = savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ = echo", "QuarterEnd from datetime import datetime, date import requests from bs4 import BeautifulSoup from", "NT2, np.log(C2) / C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)]", "2D arrayint X with nan's replaced by PCA EM model : dict Model", "Change (ln(x(t)) - ln(x(t-1))) * 100 # cca = Continuously Compounded Annual Rate", "for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231,", "that indicates a data value transformation. # lin = Levels (No transformation) [default]", "- 1) * 100 pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) -", "df.diff(periods=t['periods']) df = df * t['annualize'] # by adding return df.shift(t['shift']) alfred_api =", "in the column Notes ----- See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/", "range(1, n_iter + 1): old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1,", "Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1)", "1): old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X =", "\"\"\"Return marginal R2 of each variable from incrementally adding factors Parameters ---------- x", "BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in", "0, 'log': 1}} header_ = { k : {'id': k, 'title': v} for", ": 2D arrayint X with nan's replaced by PCA EM model : dict", "= (N * T)/(N + T) C2 = min(N, T) penalty = [np.log(NT2)", "url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else:", "url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_", "cca = Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100)", "up through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <=", "io import numpy as np import pandas as pd from pandas import DataFrame,", "in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]]))", "kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1 # \"E\" step: update", "as kmax. Else picks one of three methods in Bai & Ng (2002)", "np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:, col], col] = np.nanmean(X[:, col])", "self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to", "or len(M['s'])-1) if p else kmax or len(M['s'])-1 # \"E\" step: update missing", "is None set periodicity of dates log : int, default is 0 number", "= pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for _, row", "release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of a series", "with nan's replaced by PCA EM model : dict Model results 'u', 's',", "# NSA = Not Seasonally Adjusted # SAAR = Seasonally Adjusted Annual Rate", "row) of all loaded series Parameters ---------- columns: list of str, default is", "soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] =", "\"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE',", "covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT =", "int, default 0 (for current.csv) file name relative to base url or zipfile", "import re import xml.etree.ElementTree as ET import matplotlib.pyplot as plt from pandas.api import", "Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit", "..., 7}, default is 1 transformation code freq : str in {'M', 'Q',", "***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self): \"\"\"Return id names of all", "X[Y[:, col], col] = np.nanmean(X[:, col]) M = dict() # latest fitted model", "N variables/features in columns kmax : int, default is None maximum number of", "observations of a series and apply transforms Parameters ---------- series_id : str or", "from pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime", "as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k", "1) * 100 # cch = Continuously Compounded Rate of Change (ln(x(t)) -", "df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date']", "Adjusted # SAAR = Seasonally Adjusted Annual Rate # SSA = Smoothed Seasonally", "df = np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods'])", "fred to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start']", "elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq) if isinstance(v,", "X : 2D array T observations/samples in rows, N variables/features in columns kmax", "for s in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series", "**kwargs) def _int2date(date): \"\"\"helper method to convert int date to FRED api string", "None or series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start,", "string format\"\"\" return ([_int2date(d) for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for", "= y[Y] X = (X * std) + mean # undo standardization M['kmax']", "else kmax or len(M['s'])-1 # \"E\" step: update missing entries y = M['u'][:,", "---------- x : 2D array T observations/samples in rows, N variables/features in columns", "Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None): if echo or self.echo_: print(*args)", "realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value':", "default is 0 number of rows to shift output (negative to lag) \"\"\"", "from input date YYYYMM if url is None: then derive subfolder name from", "values are the incremental R2 for the variable in the column Notes -----", "M['n_iter'] in range(1, n_iter + 1): old = X.copy() mean, std = X.mean(axis=0).reshape(1,", "codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict()", "sys import json import io import numpy as np import pandas as pd", "'%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df =", "passed by reference Y = np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1)))", "YearEnd, QuarterEnd from datetime import datetime, date import requests from bs4 import BeautifulSoup", "if alfred_mode: # convert fred to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start'])", "df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date']", "// 100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip'", "= df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def", "mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax or", "if not determined Notes ----- See Bai and Ng (2002) and McCracken at", "apply pct_change operator periods : int, default is 1 number of periods to", "metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0,", "date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date',", "p>0 else fix number of factors r = BaiNg(X, p, kmax or len(M['s'])-1)", "observations/samples in rows, N variables/features in columns p : int in [1, 2,", "relative to base url or zipfile archive, or int date YYYYMM url :", "N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False)", "date to FRED api string format\"\"\" return ([_int2date(d) for d in date] if", "import os import sys import json import io import numpy as np import", "self.start = start self.end = end self.savefile = savefile self.cache_ = dict() self.header_", "import io import numpy as np import pandas as pd from pandas import", "0].str.find('/') > 0] # keep rows with valid date df.index = str2date(df.iloc[:, 0],", ".append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release up through date offset", "-1), X.std(axis=0).reshape(1, -1) X = (X - mean) / std # standardize #", "(2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N", "= (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] =", ": DataFrame indexed by end-of-month date Notes ----- if csvfile is int: then", "Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and", "df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df", "((1 + df) ** t['annualize']) - 1 # by compounding for _ in", "parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables =", "'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log':", "set to label if provided else series_id \"\"\" if (series_id not in self.cache_", "'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def", "= str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def", "# \"M\" step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number", "row has metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify", "\"\"\"Select from full observations of a series and apply transforms Parameters ---------- series_id", "{1: {'diff': 0, 'log': 0}, 2: {'diff': 1, 'log': 0}, 3: {'diff': 2,", "v = adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1],", "or len(s)) + 1)]) mR2 = mR2 / np.mean((u @ u.T @ x)**2,", "# weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL',", "date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:],", "+= QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']:", "= [0] + list(s**2 / (N * T)) # first case is when", "= fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage,", "not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self, category_id, api=\"category\",", "csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url or", "# auto-select number of factors if p>0 else fix number of factors r", "for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4],", "data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details", "cache_ : dict cached series and observations tcode_ : dict transformation codes Notes", "Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def transform(self,", "time series transformations Parameters ---------- data : DataFrame input data tcode : int", "self.savefile = savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def", "FRED access and data manipulation\"\"\" self.api_key = api_key self.start = start self.end =", "{'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff':", "s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset +=", "set to rank from SVD standardize : bool, default is False if True,", "str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller series by parsing multpl.com web", "\"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock:", "nan's replaced by PCA EM model : dict Model results 'u', 's', 'vT',", "log diff : int, default is 0 number of times to take difference", "factors based on ICp{p} criterion, or 0 if not determined Notes ----- See", "periods : int, default is 1 number of periods to lag for pct_change", "data to an output file\"\"\" with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_,", "{'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True,", "np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or", "BeautifulSoup from io import StringIO import pickle import zipfile import re import xml.etree.ElementTree", "vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_", "start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of", "r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url,", "offset += s['limit'] return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API", "meta = dict() for _, row in df.iloc[:5].iterrows(): if '/' not in row[0]:", "for United States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500', \"S&P's Common", "= self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c: return None c =", "MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date import requests from bs4 import", "* t['annualize'] # by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api", "in tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500',", "np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x = beta.T @ beta is", ":] # \"E\" step X[Y] = y[Y] X = (X * std) +", "default is False if True, then standardize data before processing (works better) Returns", "= Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100 # cca", "def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations of a", "T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x,", "print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break return X, M def BaiNg(x,", "df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: #", "True, then standardize data before processing (works better) Returns ------- r : int", "str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if", "str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO,", "- x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 #", "self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r", "url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD site", "if True, then standardize data before processing (works better) Returns ------- mR2 :", "1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log': 1,", "FRED-MD Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage =", "derive subfolder or zip archive name, from vintage Examples -------- md_df, mt =", "for M['n_iter'] in range(1, n_iter + 1): old = X.copy() mean, std =", "# BW = Biweekly # W = Weekly # D = Daily #", "s = (self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq)))", "series data to memory cache from saved file\"\"\" with open(savefile or self.savefile, 'rb')", "or url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f:", "if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self, category_id,", "for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df", "in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str) \\", "elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0]", "{'diff': 2, 'log': 0}, 4: {'diff': 0, 'log': 1}, 5: {'diff': 1, 'log':", "pct_change or diff operator annualize : int. default is 1 annualization factor shift", "# M = Monthly # BW = Biweekly # W = Weekly #", "x(t-1) ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent", "Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 #", "str or list of str Labels of series to retrieve start, end :", "'Q', 'D', 'Y'} or bool (default is True) resample and replace date index", ": str or list of str Labels of series to retrieve start, end", "= r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g}", "r = np.flatnonzero(sign>0) return min(r) if len(r) else 0 # first min point", "default is None latest realtime_start date of observations to keep diff, log, pct_change", "2, 3], default is 2 use PCp1 or PCp2 or PCp3 penalty kmax", "page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup =", "'date'] return df def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if", "s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label", "_int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date',", "\"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index:", "Notes ----- if csvfile is int: then derive vintage csv file name from", "len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id,", "((x(t)/x(t-1)) - 1) * 100 # pc1 = Percent Change from Year Ago", "__call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full", "int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage =", "self.end = end self.savefile = savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_", "x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or", "(release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest release up", "print(*args) def load(self, savefile=None): \"\"\"Load series data to memory cache from saved file\"\"\"", "DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred by backfilling realtime_start f =", "Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual", "in range(1, n_iter + 1): old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1),", "Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series", "echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None,", "import BeautifulSoup from io import StringIO import pickle import zipfile import re import", "1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating retrieved data", "series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata row) of", "freqs Returns ------- Series or DataFrame transformed values, name set to label if", "used var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals after k components", "number of factors if p>0 else fix number of factors r = BaiNg(X,", "difference, log and pct_change operations to apply freq : str in {'M', 'A'.", "url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key)", "output (negative to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs)", "observations set Parameters ---------- observations: DataFrame from FRED 'series/observations' api call release :", "self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations", "n : int length of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start,", "Natural Log ln(x(t)) # Frequency # A = Annual # SA = Semiannual", "'units_short'] # default list of columns to display for v in self.cache_.values(): df", "**kwargs): \"\"\"Classmethod to apply time series transformations Parameters ---------- data : DataFrame input", "pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ =", "1, 2, 3], default is 2 (i.e. 'ICp2' criterion) If 0, number of", "Notes ----- See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i,", "Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P:", "vT = np.linalg.svd(x, full_matrices=False) # increase in R2 from adding kth (orthogonal) factor", "number in every iteration Returns ------- x : 2D arrayint X with nan's", "df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end,", "cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100 #", "= Monthly # BW = Biweekly # W = Weekly # D =", "= mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1) return mR2", "'Nonrevolving consumer credit to Personal Income'], ['S&P div yield', \"S&P's Composite Common Stock:", "clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None,", "---------- X : 2D array T observations/samples in rows, N variables/features in columns", "df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize']) - 1 # by compounding", "Seasonally Adjusted Annual Rate # SSA = Smoothed Seasonally Adjusted # NA =", "missing data with factor model and EM algorithm of <NAME> (1982), Stock &", "PCA EM model : dict Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter'", "int date\"\"\" return ([_date2int(d) for d in date] if types.is_list_like(date) else int(re.sub('\\D', '',", "* 100) * n_obs_per_yr # log = Natural Log ln(x(t)) # Frequency #", "df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date df.index = str2date(df.iloc[:,", "of rows to shift output (negative to lag) \"\"\" t = {'periods':1, 'shift':0,", "and EM algorithm of <NAME> (1982), Stock & Watson (1998) and Bai &", "then derive subfolder or zip archive name, from vintage Examples -------- md_df, mt", "ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s = Series() elif series_id in", "= self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12,", "np.cumsum(mR2)) # variance of residuals after k components lnvar = np.log(np.where(var > 0,", "u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax or len(s)) + 1)]) mR2", "3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI',", "api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c", "to apply freq : str in {'M', 'A'. 'Q', 'D', 'Y'} or bool", "shiller series by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content,", "to Personal Income'], ['S&P div yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P", "name from input date YYYYMM if url is None: then derive subfolder or", "[6,8]])) def _date2int(date): \"\"\"helper method to convert FRED api string format to int", "or self.api_key) r = requests_get(url, echo=echo) if r is None: return DataFrame() contents", "stromg that indicates a data value transformation. # lin = Levels (No transformation)", "beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x = beta.T @", "start = 17760704 end = 99991231 echo_ = config.ECHO api_key = None def", "= end self.savefile = savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ =", "with month ends at selected freqs Returns ------- Series or DataFrame transformed values,", "0 # first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of", "entries assert(not np.any(np.all(Y, axis=1))) # no row can be all missing assert(not np.any(np.all(Y,", "pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor model", "df : DataFrame headers of all series loaded \"\"\" df = DataFrame() keep", "12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log': 1}} header_ =", "try: if series_id not in self.cache_: # load via api if not in", "bool whether to apply pct_change operator periods : int, default is 1 number", "series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and", "= Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca", "return df[(df.index <= min(end, vintage)) & (df.index >= start)] def series(self, series_id, api_key=None,", "= Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded", "from incrementally adding factors Parameters ---------- x : 2D array T observations/samples in", "'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff':", "+ csvfile_ vintage = csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo)", "in columns kmax : int, default is None maximum number of factors. If", "(for current.csv) file name relative to base url or zipfile archive, or int", "default is 1 transformation code freq : str in {'M', 'Q', 'A'}, default", "{'diff': 1, 'log': 0}, 3: {'diff': 2, 'log': 0}, 4: {'diff': 0, 'log':", "'%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False):", "observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0,", "'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change':", "= DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred by backfilling realtime_start f", "be all missing assert(not np.any(np.all(Y, axis=0))) # no column can be all missing", "date (inclusive) allowed Returns ------- out: Series value of each period date, optionally", "ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock Price", "# increase in R2 from adding kth (orthogonal) factor as a regressor mR2", "vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip')", "= Semiannual # Q = Quarterly # M = Monthly # BW =", "'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or self.end,", "1}, 5: {'diff': 1, 'log': 1}, 6: {'diff': 2, 'log': 1}, 7: {'diff':", "= requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details =", "- 1) * 100 pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr -", "+= YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']:", "DataFrame from FRED 'series/observations' api call release : pd.DateOffset or int (default is", "'log': 1}, 5: {'diff': 1, 'log': 1}, 6: {'diff': 2, 'log': 1}, 7:", "0) maximum release number or date offset (inclusive). If 0: latest vintage :", "df = df.set_index('id', drop=False) return df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231,", "columns to display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df =", "DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False,", "if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None,", "url, local file path or zipfile archive Returns ------- df : DataFrame indexed", "web scrape popular series names, by page number\"\"\" assert(page > 0) url =", "or zipfile archive Returns ------- df : DataFrame indexed by end-of-month date Notes", "adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2],", "Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust',", "self.api_key = api_key self.start = start self.end = end self.savefile = savefile self.cache_", "url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key)", "500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', #", "def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or vintage csv from McCracken", "column Notes ----- See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is", "series and observations tcode_ : dict transformation codes Notes ----- lin = Levels", "df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None,", "import requests from bs4 import BeautifulSoup from io import StringIO import pickle import", "int, default is None maximum number of factors. If None, set to rank", "to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float)", "for tag in tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P", "n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log':", "= self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if args else '')", "Stock Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to", "[Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve", "penalty kmax : int, default is None maximum number of factors. If None,", "Adjustment # SA = Seasonally Adjusted # NSA = Not Seasonally Adjusted #", "M['vT'][:r, :] # \"E\" step X[Y] = y[Y] X = (X * std)", "(\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start", "= Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None): if echo or self.echo_:", "end=99991231, freq=None): \"\"\"Classmethod to select a series from alfred observations set Parameters ----------", "int, default is 0 number of rows to shift output (negative to lag)", "ln(x(t)) # Frequency # A = Annual # SA = Semiannual # Q", "method to convert FRED api string format to int date\"\"\" return ([_date2int(d) for", "SVD standardize : bool, default is False if True, then standardize data before", "1) * 100 # pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) **", "@ beta is covariance matrix \"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u,", "or 0 if not determined Notes ----- See Bai and Ng (2002) and", "ln(x(t-1))) cca = Continuously Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1))) *", "and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y =", "0 number of times to take difference pct_change : bool whether to apply", "in range(t['log']): df = np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df", "[Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self,", "break return X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of", "vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage,", "Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date import", "= config.ECHO api_key = None def header(self, series_id, column='title'): \"\"\"Returns a column from", "series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None)", "args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or", "1, 'log': 0}, 3: {'diff': 2, 'log': 0}, 4: {'diff': 0, 'log': 1},", "True, then standardize data before processing (works better) Returns ------- mR2 : 2D", "processing (works better) Returns ------- mR2 : 2D array each row corresponds to", "'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA',", "1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1,", "and replace date index with month ends at selected freqs Returns ------- Series", "echo=echo) if r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r", "auto-select number of factors if p>0 else fix number of factors r =", "From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging", "return df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object,", "0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None,", "keys(self): \"\"\"Return id names of all loaded series data\"\"\" return list(self.cache_.keys()) def values(self,", "= requests_get(url, echo=echo) if r is None: return DataFrame() v = json.loads(r.content) df", "2D array each row corresponds to adding one factor component values are the", "** t['annualize']) - 1 # by compounding for _ in range(t['log']): df =", "{'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log':", "'shiller-pe'} if series_id in ['S&P: indust']: s = Series() elif series_id in ['CLAIMS']:", "0 if not determined Notes ----- See Bai and Ng (2002) and McCracken", "- ln(x(t-1))) * 100 # cca = Continuously Compounded Annual Rate of Change", "requests from bs4 import BeautifulSoup from io import StringIO import pickle import zipfile", "def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series", "at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2", "= soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date']", "Change x(t) - x(t-1) ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr)", "self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def keys(self):", "series_id not in self.cache_: # load via api if not in cache self.get(series_id)", "transforms Parameters ---------- series_id : str or list of str Labels of series", "shift output (negative to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode])", "end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\",", "= [tag.get('value') for tag in tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO':", "df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating retrieved", "wrapper to retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or", "1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1,", "Final/' + csvfile_ else: csvfile_ = 'monthly/' + csvfile_ vintage = csvfile_ else:", ": int, default is None maximum number of factors. If None, set to", "backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return", "current.csv) file name relative to base url or zipfile archive, or int date", "3: {'diff': 2, 'log': 0}, 4: {'diff': 0, 'log': 1}, 5: {'diff': 1,", "= BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax or len(M['s'])-1 #", "int date to FRED api string format\"\"\" return ([_int2date(d) for d in date]", "'monthly/current.csv' _print(vintage, echo=echo) url = url or url_ if url.endswith('.zip'): if url.startswith('http'): url", ".drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df =", "& Ng (2002) Parameters ---------- X : 2D array T observations/samples in rows,", "regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax", "of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual Rate of Change", "of each period date, optionally indexed by realtime_start Examples -------- \"\"\" df =", "(n_obs_per_yr)) - 1) * 100 # cch = Continuously Compounded Rate of Change", "0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break", "\"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704", "= self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or self.end, freq=freq) if", "to retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start),", "FRED 'series/observations' api call release : pd.DateOffset or int (default is 0) maximum", "maximum number of factors. If None, set to rank from SVD standardize :", "AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P", "df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest up to max", "Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month", "factor component values are the incremental R2 for the variable in the column", "# Frequency # A = Annual # SA = Semiannual # Q =", "if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if", "'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns to display for v", "Rate of Change (ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual Rate of", "& (df.index >= start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper", "_ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df *", "WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial", "# \"E\" step: update missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @", "# no row can be all missing assert(not np.any(np.all(Y, axis=0))) # no column", "marginal R2 of each variable from incrementally adding factors Parameters ---------- x :", "can be all missing assert(not np.any(np.all(Y, axis=0))) # no column can be all", "print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert int date to FRED api", "of factors r = BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax", "is None: then derive subfolder name from vintage \"\"\" url = url or", "12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0,", "self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None):", "MIT \"\"\" import os import sys import json import io import numpy as", "= Levels (No transformation) [default] # chg = Change x(t) - x(t-1) #", "= json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self, series_id,", "if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r =", "is 0) maximum release number or date offset (inclusive). If 0: latest vintage", "= 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo:", ": 2D array each row corresponds to adding one factor component values are", "columns kmax : int, default is None Maximum number of factors. If None,", "data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api,", "\"\"\"Base class for Alfred/Fred access, and manipulating retrieved data series Parameters ---------- cache_", "df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index >= start)] def series(self, series_id,", "'%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for", "date Notes ----- if csvfile is int: then derive vintage csv file name", "0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch':", "headers of all series loaded \"\"\" df = DataFrame() keep = ['id', 'observation_start',", "in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if", "series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r = requests_get(url,", ": pd.DateOffset or int (default is 0) maximum release number or date offset", "echo=echo) if r is None: return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess'])", "PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s = Series() elif series_id", "r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key", "url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') #", "'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a", "2, 3], default is 2 (i.e. 'ICp2' criterion) If 0, number of factors", "class Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating retrieved data series Parameters", "range(t['log']): df = np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df =", "= requests_get(url, echo=echo) return dict() if r is None else json.loads(r.content) @classmethod def", "freq='M'): \"\"\"Retrieve a raw series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\"", "missing entries assert(not np.any(np.all(Y, axis=1))) # no row can be all missing assert(not", "Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x =", "# pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100", "ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change", "'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M',", "Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating retrieved data series Parameters ----------", "df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index", "end=None): \"\"\"Retrieve metadata and full observations of a series with FRED api Parameters", "1 p : int in [0, 1, 2, 3], default is 2 (i.e.", "self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None):", "tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags] return", "'D', 'Y'} or bool (default is True) resample and replace date index with", "None: then derive subfolder name from vintage \"\"\" url = url or _fred_md_url", "= f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df", "factors is fixed as kmax. Else picks one of three methods in Bai", "'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods':", "number of difference, log and pct_change operations to apply freq : str in", "from SVD minus 1 p : int in [0, 1, 2, 3], default", "y[Y] X = (X * std) + mean # undo standardization M['kmax'] =", "f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_ = url_ +", "fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD", "if '/' not in row[0]: # this row has metadata, e.g. transform codes", "(inclusive). If 0: latest vintage : int, default is None Latest realtime_start date", "'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL',", "Parameters ---------- X : 2D array T observations/samples in rows, N variables/features in", "df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys():", "T observations/samples in rows, N variables/features in columns kmax : int, default is", "return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata for", "int date YYYYMM url : str, default is None base name of url,", "(inclusive) allowed Returns ------- out: Series value of each period date, optionally indexed", "while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess'])", "or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key,", "'%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class", "'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change':", "df.groupby('date').cumcount() df = df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date',", "Biweekly # W = Weekly # D = Daily # Seasonal Adjustment #", "at selected freqs Returns ------- Series or DataFrame transformed values, name set to", "'categories' not in c: return None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\",", "Daily # Seasonal Adjustment # SA = Seasonally Adjusted # NSA = Not", "= Percent Change ((x(t)/x(t-1)) - 1) * 100 # pc1 = Percent Change", "self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r =", "drop=False) return df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create", "df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date',", "2, 'log': 0}, 4: {'diff': 0, 'log': 1}, 5: {'diff': 1, 'log': 1},", "series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self,", "p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor model and EM", "url : str, default is None base name of url, local file path", "pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df", "r is None: return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode:", "2, 'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0,", "100 # pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) -", "y = s * u: T x n \"projection\" beta = np.diag(pca.singular_values_) @", "Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help", "import types import time from .edgar import requests_get from .busday import str2date, to_monthend", ": int, default is 0 number of times to take log diff :", "Log ln(x(t)) # Frequency # A = Annual # SA = Semiannual #", "1e-26)) NT2 = (N * T)/(N + T) C2 = min(N, T) penalty", "return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self,", "= Change from Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1))", "Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca =", "return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags] #return", "row[0]: # this row has metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '',", "self.api_key, args=\"&\" + args if args else '') r = requests_get(url, echo=echo) return", "start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with", "url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r", "((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr", "3], default is 2 use PCp1 or PCp2 or PCp3 penalty kmax :", "'/' not in row[0]: # this row has metadata, e.g. transform codes label", "api string format to int date\"\"\" return ([_date2int(d) for d in date] if", "_fred_md_url if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\"", "pd.DateOffset or int (default is 0) maximum release number or date offset (inclusive).", "default is None start and end period dates (inclusive) to keep label :", "- np.cumsum(mR2)) # variance of residuals after k components lnvar = np.log(np.where(var >", "range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df * t['annualize'] #", "{'diff': 0, 'log': 0}, 2: {'diff': 1, 'log': 0}, 3: {'diff': 2, 'log':", "rename returned series release : pd.DateOffset or int (default is 0) maximum release", "'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() # passed by", "* std) + mean # undo standardization M['kmax'] = r M['converge'] = np.sum((X", "_print(vintage, echo=echo) url = url or url_ if url.endswith('.zip'): if url.startswith('http'): url =", "each variable from incrementally adding factors Parameters ---------- x : 2D array T", "v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS',", "((x(t)/x(t-1)) - 1) * 100 pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr))", "alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations of a series as dataframe\"\"\"", "of dates log : int, default is 0 number of times to take", "= df.columns.str.rstrip('x') meta = dict() for _, row in df.iloc[:5].iterrows(): if '/' not", "self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or self.end, freq=freq) if realtime: s", "s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing", "end=end, echo=self.echo_) if series is None or series.empty: return 0 self.cache_[series_id] = {", "log, pct_change : int number of difference, log and pct_change operations to apply", "not self.get(series_id)): return None if freq is True: freq = self.header(series_id, 'frequency_short') df", "missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:, col],", "Bai & Ng (2002) to auto-determine number in every iteration Returns ------- x", "= min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) /", "is None start and end period dates (inclusive) to keep label : str,", "* T)/(N + T) C2 = min(N, T) penalty = [np.log(NT2) / NT2,", "access, and manipulating retrieved data series Parameters ---------- cache_ : dict cached series", "'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log':", "= pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0])", "Levels (No transformation) [default] # chg = Change x(t) - x(t-1) # ch1", "or _fred_md_url if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage %", "if r is None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web", "ALFRED, revisions vintages - PCA, approximate factor model, EM algorithm Author: <NAME> License:", "not determined Notes ----- See Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/", "of factors if p>0 else fix number of factors r = BaiNg(X, p,", "v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns", "np.any(np.all(Y, axis=0))) # no column can be all missing for col in np.flatnonzero(np.any(Y,", "kmax or len(s)) mR2 = [0] + list(s**2 / (N * T)) #", "standardize data before processing (works better) Returns ------- mR2 : 2D array each", "1}, 6: {'diff': 2, 'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change': True},", "= X.copy() # passed by reference Y = np.isnan(X) # identify missing entries", "'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id,", "= start self.end = end self.savefile = savefile self.cache_ = dict() self.header_ =", "if freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']:", "df.set_index('id', drop=False) return df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO):", "= f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class':", "Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') #", "= Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) *", "[tag.get('href').split('/')[-1] for tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value')", "increase in R2 from adding kth (orthogonal) factor as a regressor mR2 =", "(inclusive). If 0: latest vintage : int, default is None latest realtime_start date", "= [] offset = 0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset)", "'%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not", "Returns ------- df : DataFrame headers of all series loaded \"\"\" df =", "a data value transformation. # lin = Levels (No transformation) [default] # chg", "\\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return", "---------- data : DataFrame input data tcode : int in {1, ..., 7},", "n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x =", "{'M', 'Q', 'A'}, default is None set periodicity of dates log : int,", "of all loaded series Parameters ---------- columns: list of str, default is None", "[default] # chg = Change x(t) - x(t-1) # ch1 = Change from", "label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict() #", "a column from last meta record of a series\"\"\" if series_id not in", "FRED api string format to int date\"\"\" return ([_date2int(d) for d in date]", "dump(self, savefile=None): \"\"\"Save all memory-cached series data to an output file\"\"\" with open(savefile", "operations to apply freq : str in {'M', 'A'. 'Q', 'D', 'Y'} or", "{'M', 'A'. 'Q', 'D', 'Y'} or bool (default is True) resample and replace", "QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date']", "x : 2D array T observations/samples in rows, N variables/features in columns p", "factors based on Bai & Ng (2002) criterion Parameters ---------- x : 2D", "not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return", "is 1 number of periods to lag for pct_change or diff operator annualize", "freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill", "Change x(t) - x(t-1) # ch1 = Change from Year Ago x(t) -", "None, set to rank from SVD standardize : bool, default is False if", "self.api_key) r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id,", "= (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end =", "if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce')", "if series_id not in self.cache_: # load via api if not in cache", "\"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id", "variables/features in columns kmax : int, default is None Maximum number of factors.", "@classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape popular series names, by page", "/ std # standardize # \"M\" step: estimate factors M['u'], M['s'], M['vT'] =", "u, s, vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2", "(orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0)", "x(t) - x(t-1) # ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr)", "'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M':", "vintage : int, default is None Latest realtime_start date (inclusive) allowed Returns -------", "variable in the column Notes ----- See <NAME> Ng (2002) and McCracken at", "{'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log': 1}} header_ = { k", "Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr #", "to auto-determine number in every iteration Returns ------- x : 2D arrayint X", "int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller series by parsing", "Bai & Ng (2002) criterion Parameters ---------- x : 2D array T observations/samples", "#details = [tag.get('value') for tag in tags] #return details fred_adjust = {'HWI': 'JTSJOL',", "df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and", "- 1 # by compounding for _ in range(t['log']): df = np.log(df) for", "for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" +", "column means X[Y[:, col], col] = np.nanmean(X[:, col]) M = dict() # latest", "= 99991231 echo_ = config.ECHO api_key = None def header(self, series_id, column='title'): \"\"\"Returns", "0}, 'log': {'diff': 0, 'log': 1}} header_ = { k : {'id': k,", "or self.api_key) r = requests_get(url, echo=echo) if r is None: return DataFrame() v", "resample and replace date index with month ends at selected freqs Returns -------", "int, default is None Maximum number of factors. If None, set to rank", "None: return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return", "after k components lnvar = np.log(np.where(var > 0, var, 1e-26)) NT2 = (N", "{'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log': 1,", "axis=0).reshape(1, - 1) return mR2 # units - stromg that indicates a data", "(works better) Returns ------- mR2 : 2D array each row corresponds to adding", "'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series", "(last metadata row) of all loaded series Parameters ---------- columns: list of str,", "'series/observations' api call release : pd.DateOffset or int (default is 0) maximum release", "of three methods in Bai & Ng (2002) to auto-determine number in every", "file name relative to base url or zipfile archive, or int date YYYYMM", "url or _fred_md_url if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage // 100}-{vintage", "is 0 number of times to take log diff : int, default is", "page=1): \"\"\"Classmethod to web scrape popular series names, by page number\"\"\" assert(page >", "lin = Levels (No transformation) [default] chg = Change x(t) - x(t-1) ch1", "0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0,", "1) * 100 pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1", "def values(self, columns=None): \"\"\"Return headers (last metadata row) of all loaded series Parameters", "picks one of three methods in Bai & Ng (2002) to auto-determine number", "= np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2 = [0] +", "self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None:", "= np.linalg.svd(x, full_matrices=False) # increase in R2 from adding kth (orthogonal) factor as", "# y = s * u: T x n \"projection\" beta = np.diag(pca.singular_values_)", "'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() # passed by reference Y =", "kmax : int, default is None maximum number of factors. If None, set", "category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data as", "#tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags] #return details fred_adjust", "archive Returns ------- df : DataFrame indexed by end-of-month date Notes ----- if", "1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch':", "Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus", "api_key=api_key, start=start, end=end, echo=self.echo_) if series is None or series.empty: return 0 self.cache_[series_id]", "(df.index >= start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to", "indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None,", "'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101,", "default is 1 annualization factor shift : int, default is 0 number of", "dates (inclusive) to keep label : str, default is None New label to", "latest fitted model parameters for M['n_iter'] in range(1, n_iter + 1): old =", "row can be all missing assert(not np.any(np.all(Y, axis=0))) # no column can be", "or self.api_key, args=\"&\" + args if args else '') r = requests_get(url, echo=echo)", "a series and apply transforms Parameters ---------- series_id : str or list of", "memory-cached series data to an output file\"\"\" with open(savefile or self.savefile, 'wb') as", "col] = np.nanmean(X[:, col]) M = dict() # latest fitted model parameters for", "and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA, approximate factor model, EM", "echo=ECHO): \"\"\"API wrapper to retrieve full observations of a series as dataframe\"\"\" url", "dict() if r is None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to", "standardize=False): \"\"\"Return marginal R2 of each variable from incrementally adding factors Parameters ----------", "and Bai & Ng (2002) Parameters ---------- X : 2D array T observations/samples", "of difference, log and pct_change operations to apply freq : str in {'M',", "\"loadings\" x.T @ x = beta.T @ beta is covariance matrix \"\"\" if", "df = df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first')", "= np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r) else 0", "np.log(C2) / C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign", "[np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1] ic =", "------- out: Series value of each period date, optionally indexed by realtime_start Examples", "dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or", "<NAME> (1982), Stock & Watson (1998) and Bai & Ng (2002) Parameters ----------", "'log': 0, 'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12},", "\"\"\"Classmethod to web scrape popular series names, by page number\"\"\" assert(page > 0)", "- ln(x(t-1))) * 100) * n_obs_per_yr # log = Natural Log ln(x(t)) #", "self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached", "self.get(series_id)): return None if freq is True: freq = self.header(series_id, 'frequency_short') df =", "\"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to", "R2 of each variable from incrementally adding factors Parameters ---------- x : 2D", "date import requests from bs4 import BeautifulSoup from io import StringIO import pickle", "\"\"\"API wrapper to retrieve full observations of a series as dataframe\"\"\" url =", "model parameters for M['n_iter'] in range(1, n_iter + 1): old = X.copy() mean,", "start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access and data", "if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] +=", "= M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y] =", "if M['converge'] < tol: break return X, M def BaiNg(x, p=2, kmax=None, standardize=False,", ": int best number of factors based on ICp{p} criterion, or 0 if", "* 100 pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch", "for d in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper", "np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y] = y[Y] X = (X", "Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate", "{'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF',", "assert(not np.any(np.all(Y, axis=0))) # no column can be all missing for col in", "# keep rows with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index", "# simplify label str meta[label] = row[1:].astype(int).to_dict() # as dict of int codes", "= soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags] #return details fred_adjust =", "'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns to display for v in", "offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last')", "_print(self, *args, echo=None): if echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series", "Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving", "df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df * t['annualize'] # by adding return", "criterion Parameters ---------- x : 2D array T observations/samples in rows, N variables/features", "url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df", "open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save", "import xml.etree.ElementTree as ET import matplotlib.pyplot as plt from pandas.api import types import", "r = requests_get(url, echo=echo) if r is None: return DataFrame() v = json.loads(r.content)", "in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <=", "code freq : str in {'M', 'Q', 'A'}, default is None set periodicity", "[4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert FRED api string format to", "and full observations of a series with FRED api Parameters ---------- series_id :", "SVD minus 1 p : int in [0, 1, 2, 3], default is", "None: return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert", "case is when no factors used var = (sum(mR2) - np.cumsum(mR2)) # variance", "= pca.transform(x) # y = s * u: T x n \"projection\" beta", "Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No.", "adding factors Parameters ---------- x : 2D array T observations/samples in rows, N", "to convert int date to FRED api string format\"\"\" return ([_int2date(d) for d", ": int, default is 1 number of periods to lag for pct_change or", "= Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded", "(np.arange(kmax or len(s)) + 1)]) mR2 = mR2 / np.mean((u @ u.T @", "of observations to keep diff, log, pct_change : int number of difference, log", "Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100 # cca =", "freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values", "self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return", "convert int date to FRED api string format\"\"\" return ([_int2date(d) for d in", "**kwargs): \"\"\"API wrapper to retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for", "np.any(np.all(Y, axis=1))) # no row can be all missing assert(not np.any(np.all(Y, axis=0))) #", "axis=0))) # no column can be all missing for col in np.flatnonzero(np.any(Y, axis=0)):", "of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr # log = Natural", "if 'categories' not in c: return None c = c['categories'][0] c['children'] = self.category(category_id,", "vintage \"\"\" url = url or _fred_md_url if isinstance(vintage, int) and vintage: vintage", "Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) * 100) * n_obs_per_yr", "url_ = _fred_md_url if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage", "'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns to display for", "# lin = Levels (No transformation) [default] # chg = Change x(t) -", "keep rows with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index =", "headers (last metadata row) of all loaded series Parameters ---------- columns: list of", "\"\"\" if (series_id not in self.cache_ and not self.get(series_id)): return None if freq", "of factors. If None, set to rank from SVD standardize : bool, default", "string format to int date\"\"\" return ([_date2int(d) for d in date] if types.is_list_like(date)", "format to int date\"\"\" return ([_date2int(d) for d in date] if types.is_list_like(date) else", "default is 0 number of times to take log diff : int, default", "to echo debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to", "derive subfolder name from vintage \"\"\" url = url or _fred_md_url if isinstance(vintage,", "diff : int, default is 0 number of times to take difference pct_change", "int, default is None start and end period dates (inclusive) to keep label", "columns kmax : int, default is None maximum number of factors. If None,", "self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r =", "time from .edgar import requests_get from .busday import str2date, to_monthend import config #", "'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df =", "api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None,", "in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert FRED api string", "log = Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log': 0},", "0 (for current.csv) file name relative to base url or zipfile archive, or", "dict of int codes df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows", "echo=None): if echo or self.echo_: print(*args) def load(self, savefile=None): \"\"\"Load series data to", "1}} header_ = { k : {'id': k, 'title': v} for k,v in", "[1, 2, 3], default is 2 use PCp1 or PCp2 or PCp3 penalty", "'A'}, default is None set periodicity of dates log : int, default is", "url = url or _fred_md_url if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage", "import pickle import zipfile import re import xml.etree.ElementTree as ET import matplotlib.pyplot as", "np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df =", "memory cache from saved file\"\"\" with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f))", "and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N =", "first case is when no factors used var = (sum(mR2) - np.cumsum(mR2)) #", "import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper", "observations: DataFrame from FRED 'series/observations' api call release : pd.DateOffset or int (default", "api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations of a series with FRED", "factors used var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals after k", "PCp2 or PCp3 penalty kmax : int, default is None maximum number of", "diff, log, pct_change : int number of difference, log and pct_change operations to", "= X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X - mean) / std #", "tcode_ = {1: {'diff': 0, 'log': 0}, 2: {'diff': 1, 'log': 0}, 3:", "Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United States'], ['AMDMNO', 'New Orders for", "Adjusted # NSA = Not Seasonally Adjusted # SAAR = Seasonally Adjusted Annual", "std) + mean # undo standardization M['kmax'] = r M['converge'] = np.sum((X -", "best number of factors based on ICp{p} criterion, or 0 if not determined", "series is None or series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id,", "the incremental R2 for the variable in the column Notes ----- See <NAME>", "bs4 import BeautifulSoup from io import StringIO import pickle import zipfile import re", "penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1]", "value transformation. # lin = Levels (No transformation) [default] # chg = Change", "in rows, N variables/features in columns kmax : int, default is None maximum", "pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca", "to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA,", "df : DataFrame indexed by end-of-month date Notes ----- if vintage is int:", "c['series'] = [] offset = 0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key,", "release number or date offset (inclusive). If 0: latest vintage : int, default", "+ 1): old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X", "= to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x:", "ET import matplotlib.pyplot as plt from pandas.api import types import time from .edgar", "list of str, default is None subset of header columns to return Returns", "df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self,", "1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else: # else latest", "echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert int date to FRED", "= df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse", "= (X - mean) / std # standardize # \"M\" step: estimate factors", "class for Alfred/Fred access, and manipulating retrieved data series Parameters ---------- cache_ :", "date\"\"\" return ([_date2int(d) for d in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10])))", "Returns ------- out: Series value of each period date, optionally indexed by realtime_start", "date offset (inclusive). If 0: latest vintage : int, default is None Latest", "step: update missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :]", ": dict cached series and observations tcode_ : dict transformation codes Notes -----", "[0, 1, 2, 3], default is 2 (i.e. 'ICp2' criterion) If 0, number", "= (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format", "x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 # pc1", "min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p", "for the variable in the column Notes ----- See <NAME> Ng (2002) and", ": dict Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X =", "-------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015", "echo def _print(self, *args, echo=None): if echo or self.echo_: print(*args) def load(self, savefile=None):", "when no factors used var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals", "import pandas as pd from pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd,", "{'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff':", "(default is 0) maximum release number or date offset (inclusive). If 0: latest", "If 0: latest vintage : int, default is None latest realtime_start date of", "api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data as dict\"\"\" args", "default is None New label to rename returned series release : pd.DateOffset or", "_fred_md_url if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\"", "types import time from .edgar import requests_get from .busday import str2date, to_monthend import", "'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United States'], ['AMDMNO',", "mR2 = mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1) return", "---------- vintage : str or int, default 0 (for current.csv) file name relative", "data to memory cache from saved file\"\"\" with open(savefile or self.savefile, 'rb') as", "United States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500', \"S&P's Common Stock", "'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO':", "pct_change operations to apply freq : str in {'M', 'A'. 'Q', 'D', 'Y'}", "in [1, 2, 3], default is 2 use PCp1 or PCp2 or PCp3", "end period dates (inclusive) to keep label : str, default is None New", "url = url or url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with", "------- df : DataFrame indexed by end-of-month date Notes ----- if vintage is", "np.linalg.svd(X) # auto-select number of factors if p>0 else fix number of factors", "transformation) [default] chg = Change x(t) - x(t-1) ch1 = Change from Year", "df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <=", "(N * T)) # first case is when no factors used var =", "dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE", "---------- series_id : str or list of str ids of series to retrieve", "number of factors. If None, set to rank from SVD minus 1 p", ":] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y = s * u:", "series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations of a series with", "or vintage csv from McCracken FRED-MD site Parameters ---------- vintage : str or", "self.start, end=end or self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] =", "R2 for the variable in the column Notes ----- See <NAME> Ng (2002)", "vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of a series and", "f: df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns =", "row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict() # as dict of int", "'pct_change': True}, 'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff':", "kth (orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2,", "t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']:", "t['annualize']) - 1 # by compounding for _ in range(t['log']): df = np.log(df)", "in rows, N variables/features in columns kmax : int, default is None Maximum", "np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1) return mR2 # units -", "site Parameters ---------- vintage : str or int, default 0 (for current.csv) file", "\"\"\"Load series data to memory cache from saved file\"\"\" with open(savefile or self.savefile,", "data series Parameters ---------- cache_ : dict cached series and observations tcode_ :", "import matplotlib.pyplot as plt from pandas.api import types import time from .edgar import", ".edgar import requests_get from .busday import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/", "offset (inclusive). If 0: latest vintage : int, default is None Latest realtime_start", "fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv',", "return None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] =", "category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231 echo_ =", "EM model : dict Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\"", "{r}\") if M['converge'] < tol: break return X, M def BaiNg(x, p=2, kmax=None,", "Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'],", "= self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or self.api_key) r", "%d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df):", "page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df =", "return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full", "Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1 = Percent Change from Year", "Weekly # D = Daily # Seasonal Adjustment # SA = Seasonally Adjusted", "@ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step X[Y] = y[Y] X =", "col], col] = np.nanmean(X[:, col]) M = dict() # latest fitted model parameters", "types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve", "else fix number of factors r = BaiNg(X, p, kmax or len(M['s'])-1) if", "vintage is int: then derive vintage csv file name from input date YYYYMM", "if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break return X, M", "and apply transforms Parameters ---------- series_id : str or list of str Labels", "alfred observations set Parameters ---------- observations: DataFrame from FRED 'series/observations' api call release", "fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int)", "columns to return Returns ------- df : DataFrame headers of all series loaded", "assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml')", "None if freq is True: freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'],", "'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1':", "'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id):", "to shift output (negative to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1}", "and data manipulation\"\"\" self.api_key = api_key self.start = start self.end = end self.savefile", "ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log': 0}, 2: {'diff': 1, 'log':", "= ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT", "'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log':", "self.api_key) r = requests_get(url, echo=echo) if r is None: return DataFrame() v =", "Seasonal Adjustment # SA = Seasonally Adjusted # NSA = Not Seasonally Adjusted", "div yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite", "number or date offset (inclusive). If 0: latest vintage : int, default is", "len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select", "# cca = Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1))) *", "# pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_", "= { k : {'id': k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month", "# \"loadings\" x.T @ x = beta.T @ beta is covariance matrix \"\"\"", "------- df : DataFrame indexed by end-of-month date Notes ----- if csvfile is", "series transformations Parameters ---------- data : DataFrame input data tcode : int in", "0 number of times to take log diff : int, default is 0", "s**2/(T-1) y = pca.transform(x) # y = s * u: T x n", "v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self,", "'Y'} or bool (default is True) resample and replace date index with month", "var, 1e-26)) NT2 = (N * T)/(N + T) C2 = min(N, T)", "standardize # \"M\" step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select", "x(t) - x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) - 1) * 100", "to int date\"\"\" return ([_date2int(d) for d in date] if types.is_list_like(date) else int(re.sub('\\D',", "= vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url or url_ if url.endswith('.zip'):", "= Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0, 'log': 0}, 2:", "T x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @", "rows to shift output (negative to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False,", "csv from McCracken FRED-MD site Parameters ---------- vintage : str or int, default", "['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal", "os import sys import json import io import numpy as np import pandas", "DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or vintage csv from", "tol: break return X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number", "df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id]", "csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_ =", "series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations", "\"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in series_id] series =", "or self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None:", "is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo)", ": int. default is 1 annualization factor shift : int, default is 0", "in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if", "by realtime_start Examples -------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date']", "is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo)", "'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log':", "[default] chg = Change x(t) - x(t-1) ch1 = Change from Year Ago", "1}, 7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0},", "of a series\"\"\" if series_id not in self.header_: try: if series_id not in", "0}, 3: {'diff': 2, 'log': 0}, 4: {'diff': 0, 'log': 1}, 5: {'diff':", "'realtime_start']) if isinstance(release, int): # keep latest up to max release df['release'] =", "apply transforms Parameters ---------- series_id : str or list of str Labels of", "Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca = Compounded", "means X[Y[:, col], col] = np.nanmean(X[:, col]) M = dict() # latest fitted", "df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df", "is 1 transformation code freq : str in {'M', 'Q', 'A'}, default is", "to lag for pct_change or diff operator annualize : int. default is 1", "(ln(x(t)) - ln(x(t-1))) * 100 # cca = Continuously Compounded Annual Rate of", "License: MIT \"\"\" import os import sys import json import io import numpy", "or int date YYYYMM url : str, default is None base name of", "https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\"", "self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c: return None c = c['categories'][0]", "X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X - mean) / std # standardize", "take difference pct_change : bool whether to apply pct_change operator periods : int,", "c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c: return None c", "columns: list of str, default is None subset of header columns to return", "((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca = Compounded Annual Rate of Change", "tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor model and EM algorithm", "= Weekly # D = Daily # Seasonal Adjustment # SA = Seasonally", "for Durable Goods'], ['S&P 500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail", "+= MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']:", "default is 0 number of times to take difference pct_change : bool whether", "of factors. If None, set to rank from SVD minus 1 p :", "def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to update FRED-MD dataset", "None set periodicity of dates log : int, default is 0 number of", "Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper", "file\"\"\" with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self,", "Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 # pca = Compounded Annual Rate", "tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d')", "model : dict Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X", "re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage", "to select a series from alfred observations set Parameters ---------- observations: DataFrame from", "- PCA, approximate factor model, EM algorithm Author: <NAME> License: MIT \"\"\" import", "str or list of str ids of series to retrieve Returns ------- n", "if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df =", "99991231 echo_ = config.ECHO api_key = None def header(self, series_id, column='title'): \"\"\"Returns a", "as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series data", "offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self,", "or len(M['s'])-1 # \"E\" step: update missing entries y = M['u'][:, :r] @", "(\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231", "release df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1 == (release or 99999999)]\\", "6: {'diff': 2, 'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin':", "int in [0, 1, 2, 3], default is 2 (i.e. 'ICp2' criterion) If", "{'id': k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month", "Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United", "date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]])) def", "\"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start =", "df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:,", "zip archive name, from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages", "simplify label str meta[label] = row[1:].astype(int).to_dict() # as dict of int codes df", "Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate of Change", ": int in [1, 2, 3], default is 2 use PCp1 or PCp2", "api_key=api_key or self.api_key, args=\"&\" + args if args else '') r = requests_get(url,", "100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns", "requests_get(url, echo=echo) return dict() if r is None else json.loads(r.content) @classmethod def popular(self,", ": str in {'M', 'Q', 'A'}, default is None set periodicity of dates", "X.std(axis=0).reshape(1, -1) X = (X - mean) / std # standardize # \"M\"", "api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset = 0 while True: s =", "web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df", "api Parameters ---------- series_id : str or list of str ids of series", "- ln(x(t-1))) cca = Continuously Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1)))", "Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"],", "> 0, var, 1e-26)) NT2 = (N * T)/(N + T) C2 =", "soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0],", "number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data,", "arrayint X with nan's replaced by PCA EM model : dict Model results", "url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f,", "'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log':", "self.cache_ and not self.get(series_id)): return None if freq is True: freq = self.header(series_id,", "Not Seasonally Adjusted # SAAR = Seasonally Adjusted Annual Rate # SSA =", "lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index()", "Parameters ---------- x : 2D array T observations/samples in rows, N variables/features in", "kmax or len(M['s'])-1 # \"E\" step: update missing entries y = M['u'][:, :r]", "Year Ago x(t) - x(t-n_obs_per_yr) # pch = Percent Change ((x(t)/x(t-1)) - 1)", "\"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231 echo_ = config.ECHO api_key =", "= min(len(s), kmax or len(s)) mR2 = [0] + list(s**2 / (N *", "100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage),", "= Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1 = Percent Change from", "'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL': 'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M',", "of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United States'], ['AMDMNO', 'New", "Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P div", "1) return mR2 # units - stromg that indicates a data value transformation.", "subfolder or zip archive name, from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical", "cache from saved file\"\"\" with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return", "True}, 'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff':", "messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert int date", "Levels (No transformation) [default] chg = Change x(t) - x(t-1) ch1 = Change", "= Continuously Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log", "realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df", "to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df =", "by end-of-month date Notes ----- if vintage is int: then derive vintage csv", "100 # cca = Continuously Compounded Annual Rate of Change ((ln(x(t)) - ln(x(t-1)))", "io import StringIO import pickle import zipfile import re import xml.etree.ElementTree as ET", "from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded Annual Rate", "of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate of Change (ln(x(t))", "\"\"\"API wrapper to retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start", "is None latest realtime_start date of observations to keep diff, log, pct_change :", "\"\"\"Determine number of factors based on Bai & Ng (2002) criterion Parameters ----------", "default is None maximum number of factors. If None, set to rank from", "fitted model parameters for M['n_iter'] in range(1, n_iter + 1): old = X.copy()", "self.cache_: # load via api if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1]", "access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA, approximate", "X = (X * std) + mean # undo standardization M['kmax'] = r", "alfred_mode: # convert fred to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) &", "'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M',", "manipulation\"\"\" self.api_key = api_key self.start = start self.end = end self.savefile = savefile", "columns p : int in [1, 2, 3], default is 2 use PCp1", "D = Daily # Seasonal Adjustment # SA = Seasonally Adjusted # NSA", "\"\"\"helper to echo debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method", "import sys import json import io import numpy as np import pandas as", "df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred", "series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None: return", "factors. If None, set to rank from SVD minus 1 p : int", "corresponds to adding one factor component values are the incremental R2 for the", "* 100 pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) *", "# by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\"", "Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded", "StringIO import pickle import zipfile import re import xml.etree.ElementTree as ET import matplotlib.pyplot", "@classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations", "of each variable from incrementally adding factors Parameters ---------- x : 2D array", "r is None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape", "method to retrieve shiller series by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\"", "minus 1 p : int in [0, 1, 2, 3], default is 2", "date YYYYMM url : str, default is None base name of url, local", "self.api_key) r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series\", series_id=series_id,", "2 (i.e. 'ICp2' criterion) If 0, number of factors is fixed as kmax.", "'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca': {'diff': 1, 'log': 1, 'annualize':", "(self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s", "int, default 0 (i.e. current.csv) file name relative to base url or zipfile", "from datetime import datetime, date import requests from bs4 import BeautifulSoup from io", "df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda", "data tcode : int in {1, ..., 7}, default is 1 transformation code", "A = Annual # SA = Semiannual # Q = Quarterly # M", "= df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index", "input data tcode : int in {1, ..., 7}, default is 1 transformation", "{'diff': 0, 'log': 1}} header_ = { k : {'id': k, 'title': v}", "or list of str ids of series to retrieve Returns ------- n :", "echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date):", "latest vintage : int, default is None Latest realtime_start date (inclusive) allowed Returns", "----- if vintage is int: then derive vintage csv file name from input", "df = DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred by backfilling realtime_start", "be all missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means", "full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2 = [0] + list(s**2 /", "r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key", "no factors used var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals after", "freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date']", "'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int) and vintage: csvfile_", "{'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0,", "def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations Parameters", "self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self,", "\"\"\" df = DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short',", "series Parameters ---------- columns: list of str, default is None subset of header", "by end-of-month date Notes ----- if csvfile is int: then derive vintage csv", "= df[df['realtime_start'] <= _int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if", "else: # else latest release up through date offset df['release'] = (df['date'] +", "multpl(page): \"\"\"Helper method to retrieve shiller series by parsing multpl.com web page\"\"\" url", "full observations of a series and apply transforms Parameters ---------- series_id : str", "log and pct_change operations to apply freq : str in {'M', 'A'. 'Q',", "= 0 while True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']:", "for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return", "# first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each", "chg = Change x(t) - x(t-1) ch1 = Change from Year Ago x(t)", "not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return df def fred_md(vintage=0, url=None, echo=config.ECHO):", "start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata as dataframe\"\"\" url =", "= requests_get(url, echo=echo) if r is None: return DataFrame() contents = json.loads(r.content) df", "or zip archive name, from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD", "series_id \"\"\" if (series_id not in self.cache_ and not self.get(series_id)): return None if", "df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None): c =", "(df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def get_category(self, category_id,", "x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 pc1 = Percent", "([_int2date(d) for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in", "= url or url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage)", "parameters for M['n_iter'] in range(1, n_iter + 1): old = X.copy() mean, std", "Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 # cch =", "zipfile import re import xml.etree.ElementTree as ET import matplotlib.pyplot as plt from pandas.api", "'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/'", "adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api", "+= pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df = df[df['realtime_start'] <= _int2date(vintage)] df['value'] =", "& df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None):", "return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api =", "return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve", "0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags =", "indexed by end-of-month date Notes ----- if vintage is int: then derive vintage", "'s-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']: s = Series()", "(ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_ =", "based on Bai & Ng (2002) criterion Parameters ---------- x : 2D array", "log : int, default is 0 number of times to take log diff", "start=start or self.start, end=end or self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame()", "Durable Goods'], ['S&P 500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and", "if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df =", "x)**2, axis=0).reshape(1, - 1) return mR2 # units - stromg that indicates a", "* T)) # first case is when no factors used var = (sum(mR2)", "Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted", "x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s, vT = np.linalg.svd(x, full_matrices=False) kmax =", "v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return s[s.index >=", "one factor component values are the incremental R2 for the variable in the", "meta record of a series\"\"\" if series_id not in self.header_: try: if series_id", "of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end", "PCp1 or PCp2 or PCp3 penalty kmax : int, default is None maximum", "MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date']", "def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve", "_date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) &", "['S&P PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common", "pct_change : int number of difference, log and pct_change operations to apply freq", "d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6],", "all loaded series Parameters ---------- columns: list of str, default is None subset", "and methods to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages", "{'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12},", "['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"],", "values, name set to label if provided else series_id \"\"\" if (series_id not", "in [0, 1, 2, 3], default is 2 (i.e. 'ICp2' criterion) If 0,", "standardize data before processing (works better) Returns ------- r : int best number", "100 pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100", "self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is None or series.empty: return 0", "file name from input date YYYYMM if url is None: then derive subfolder", "format\"\"\" return ([_int2date(d) for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b] for a,", "= dict() # latest fitted model parameters for M['n_iter'] in range(1, n_iter +", "Rates\"], ['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P div yield', \"S&P's Composite", "is int: then derive vintage csv file name from input date YYYYMM if", "1, 'log': 1}, 6: {'diff': 2, 'log': 1}, 7: {'diff': 1, 'log': 0,", "Maximum number of factors. If None, set to rank from SVD minus 1", "NSA = Not Seasonally Adjusted # SAAR = Seasonally Adjusted Annual Rate #", "int best number of factors based on ICp{p} criterion, or 0 if not", "dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key", "7: {'diff': 1, 'log': 0, 'pct_change': True}, 'lin': {'diff': 0, 'log': 0}, 'chg':", "'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log':", "if (series_id not in self.cache_ and not self.get(series_id)): return None if freq is", "archive, or int date YYYYMM url : str, default is None base name", "series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations of", "series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end),", "else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve shiller series by", "'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns to", "in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df * t['annualize']", "allowed Returns ------- out: Series value of each period date, optionally indexed by", "echo=False): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD site Parameters", "savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access and data manipulation\"\"\" self.api_key", "Bai & Ng (2002) Parameters ---------- X : 2D array T observations/samples in", "FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer credit to", "factors if p>0 else fix number of factors r = BaiNg(X, p, kmax", "of str Labels of series to retrieve start, end : int, default is", "* u: T x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\"", "series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is None or", "missing assert(not np.any(np.all(Y, axis=0))) # no column can be all missing for col", "def __init__(self, api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED", "weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'], 'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']}", "100 # pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) *", "retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end", "keep latest up to max release df['release'] = df.groupby('date').cumcount() df = df[df['release'] +", "a series\"\"\" if series_id not in self.header_: try: if series_id not in self.cache_:", "and pct_change operations to apply freq : str in {'M', 'A'. 'Q', 'D',", "name relative to base url or zipfile archive, or int date YYYYMM url", "- ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_ = {1:", "default is 1 number of periods to lag for pct_change or diff operator", "----- lin = Levels (No transformation) [default] chg = Change x(t) - x(t-1)", "to label if provided else series_id \"\"\" if (series_id not in self.cache_ and", "x : 2D arrayint X with nan's replaced by PCA EM model :", "Seasonally Adjusted # NSA = Not Seasonally Adjusted # SAAR = Seasonally Adjusted", "saved file\"\"\" with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def", "Index for United States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500', \"S&P's", "date YYYYMM if url is None: then derive subfolder or zip archive name,", "= DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None, end=None,", "then derive vintage csv file name from input date YYYYMM if url is", "factors Parameters ---------- x : 2D array T observations/samples in rows, N variables/features", "Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United States'], ['AMDMNO', 'New Orders", "['CONSPI', 'Nonrevolving consumer credit to Personal Income'], ['S&P div yield', \"S&P's Composite Common", "data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time series transformations Parameters ---------- data", "= re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict() # as", "series Parameters ---------- cache_ : dict cached series and observations tcode_ : dict", "index with month ends at selected freqs Returns ------- Series or DataFrame transformed", "codes Notes ----- lin = Levels (No transformation) [default] chg = Change x(t)", "api_key self.start = start self.end = end self.savefile = savefile self.cache_ = dict()", "s in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is", "auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO):", "dict Model results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy()", "by reference Y = np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1))) #", "df def fred_md(vintage=0, url=None, echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv from", "pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest up", "'periods': 12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log':", "{series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None):", "= Not Seasonally Adjusted # SAAR = Seasonally Adjusted Annual Rate # SSA", "None: then derive subfolder or zip archive name, from vintage Examples -------- md_df,", "int, default is None latest realtime_start date of observations to keep diff, log,", "Notes ----- See Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if", "min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable from", "scrape popular series names, by page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\"", "for k in (np.arange(kmax or len(s)) + 1)]) mR2 = mR2 / np.mean((u", "adding kth (orthogonal) factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @", "if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\"", "+ df) ** t['annualize']) - 1 # by compounding for _ in range(t['log']):", "#df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df = df * t['annualize'] # by", "np.log(C2) / NT2, np.log(C2) / C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax", "\"\"\" X = X.copy() # passed by reference Y = np.isnan(X) # identify", "If None, set to rank from SVD standardize : bool, default is False", "from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date import requests", "= df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']: df['date'] += YearEnd(0) if freq.upper()[0]", "and not self.get(series_id)): return None if freq is True: freq = self.header(series_id, 'frequency_short')", "api string format\"\"\" return ([_int2date(d) for d in date] if types.is_list_like(date) else \"-\".join(str(date)[a:b]", "X.copy() # passed by reference Y = np.isnan(X) # identify missing entries assert(not", "[Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to", "for tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for", "Y = np.isnan(X) # identify missing entries assert(not np.any(np.all(Y, axis=1))) # no row", "SAAR = Seasonally Adjusted Annual Rate # SSA = Smoothed Seasonally Adjusted #", "dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end), api_key=api_key or", "api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c: return None", "Alfred/Fred access, and manipulating retrieved data series Parameters ---------- cache_ : dict cached", "site Parameters ---------- vintage : str or int, default 0 (i.e. current.csv) file", "from .edgar import requests_get from .busday import str2date, to_monthend import config # From", "\"\"\"helper method to convert FRED api string format to int date\"\"\" return ([_date2int(d)", "raw series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P", "realtime_start date of observations to keep diff, log, pct_change : int number of", "url or zipfile archive, or int date YYYYMM url : str, default is", "/ NT2, np.log(C2) / C2][p - 1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax +", "is None subset of header columns to return Returns ------- df : DataFrame", "of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100 # cch = Continuously", "of str, default is None subset of header columns to return Returns -------", "# ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) # pch =", "# From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo", "mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url + 'Historical_FRED-MD.zip') # pre-2015 md_df, mt", "label str meta[label] = row[1:].astype(int).to_dict() # as dict of int codes df =", "import time from .edgar import requests_get from .busday import str2date, to_monthend import config", "int. default is 1 annualization factor shift : int, default is 0 number", "0}, 4: {'diff': 0, 'log': 1}, 5: {'diff': 1, 'log': 1}, 6: {'diff':", "0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1] if not types.is_numeric_dtype(df): df = df.map(lambda x: re.sub('[^\\d\\.\\-]','',x)).astype(float) return", "details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags] #return details", "dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None): if echo", "iteration Returns ------- x : 2D arrayint X with nan's replaced by PCA", "full observations of a series with FRED api Parameters ---------- series_id : str", "series_id not in self.header_: try: if series_id not in self.cache_: # load via", "& Watson (1998) and Bai & Ng (2002) Parameters ---------- X : 2D", "& Ng (2002) criterion Parameters ---------- x : 2D array T observations/samples in", "['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help Wanted Index for United States'],", "category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c: return", "in columns p : int in [1, 2, 3], default is 2 use", "adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series to update FRED-MD dataset Notes", "keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release'])", "'s', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() # passed by reference", "x(t) - x(t-1) ch1 = Change from Year Ago x(t) - x(t-n_obs_per_yr) pch", "number of times to take log diff : int, default is 0 number", "to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs):", "Parameters ---------- vintage : str or int, default 0 (for current.csv) file name", "Unemployed'], ['HWI', 'Help Wanted Index for United States'], ['AMDMNO', 'New Orders for Durable", "'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1}, 'cca':", "0, var, 1e-26)) NT2 = (N * T)/(N + T) C2 = min(N,", "old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break", "or zipfile archive, or int date YYYYMM url : str, default is None", "for a, b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert", "release : pd.DateOffset or int (default is 0) maximum release number or date", "by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f, 'date']", "variables/features in columns p : int in [1, 2, 3], default is 2", "an output file\"\"\" with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f) return", "_int2date(vintage)] df['value'] = pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): #", "file path or zipfile archive Returns ------- df : DataFrame indexed by end-of-month", "start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None,", "of factors is fixed as kmax. Else picks one of three methods in", "['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"],", "diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break return X,", "or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations and", "# replace with column means X[Y[:, col], col] = np.nanmean(X[:, col]) M =", "str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0,", "series from alfred observations set Parameters ---------- observations: DataFrame from FRED 'series/observations' api", "is 2 use PCp1 or PCp2 or PCp3 penalty kmax : int, default", "pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 # pc1 = Percent", "= df.set_index('id', drop=False) return df[columns or keep] def __init__(self, api_key, start=17760704, end=99991231, savefile=None,", ">= start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve", "number of times to take difference pct_change : bool whether to apply pct_change", "selected freqs Returns ------- Series or DataFrame transformed values, name set to label", "default 0 (for current.csv) file name relative to base url or zipfile archive,", "1] ic = (lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1])", "model, EM algorithm Author: <NAME> License: MIT \"\"\" import os import sys import", "vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url or url_ if url.endswith('.zip'): if", "freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6)", "_date2int(df['realtime_end']) df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index >= start)]", "= shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s", "'ICp2' criterion) If 0, number of factors is fixed as kmax. Else picks", "Examples -------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date'])", "valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:,", "M = dict() # latest fitted model parameters for M['n_iter'] in range(1, n_iter", "rank from SVD standardize : bool, default is False if True, then standardize", "name set to label if provided else series_id \"\"\" if (series_id not in", "each row corresponds to adding one factor component values are the incremental R2", "df.index.name = str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO):", "label : str, default is None New label to rename returned series release", "else 0 # first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2", "\"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD site Parameters ----------", "# SAAR = Seasonally Adjusted Annual Rate # SSA = Smoothed Seasonally Adjusted", "metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str", "2D array T observations/samples in rows, N variables/features in columns p : int", "a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k in", "self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id})", "in columns kmax : int, default is None Maximum number of factors. If", "if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] +=", "in shiller.keys(): v = shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys(): v", "row corresponds to adding one factor component values are the incremental R2 for", "([_date2int(d) for d in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page):", "# units - stromg that indicates a data value transformation. # lin =", "object, with api_key, for FRED access and data manipulation\"\"\" self.api_key = api_key self.start", "if r is None: return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name", "See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_", "int codes df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid", "---------- vintage : str or int, default 0 (i.e. current.csv) file name relative", "default is None Latest realtime_start date (inclusive) allowed Returns ------- out: Series value", "or diff operator annualize : int. default is 1 annualization factor shift :", "{M['converge']:8.3g} {r}\") if M['converge'] < tol: break return X, M def BaiNg(x, p=2,", "'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta =", "np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for k in (np.arange(kmax or len(s)) +", "determined Notes ----- See Bai and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\"", "\"\"\"Classmethod to apply time series transformations Parameters ---------- data : DataFrame input data", "release=release, vintage=vintage, start=start or self.start, end=end or self.end, freq=freq) if realtime: s =", "if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] +=", "tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500': 'SP500', 'RETAIL':", "df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns or keep] def __init__(self, api_key,", "pct_change : bool whether to apply pct_change operator periods : int, default is", "else '') r = requests_get(url, echo=echo) return dict() if r is None else", "header(self, series_id, column='title'): \"\"\"Returns a column from last meta record of a series\"\"\"", "Parameters ---------- series_id : str or list of str Labels of series to", "freq : str in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default is", "= Change x(t) - x(t-1) ch1 = Change from Year Ago x(t) -", "DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v =", "csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_ else: csvfile_ = 'monthly/' +", "[['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio", "'Help Wanted Index for United States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P", "DataFrame indexed by end-of-month date Notes ----- if csvfile is int: then derive", "ignore_index=True) df = df.set_index('id', drop=False) return df[columns or keep] def __init__(self, api_key, start=17760704,", "sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r) else", "Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod to apply time", ": str in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default is True)", "'Historical_FRED-MD.zip') # pre-2015 md_df, mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\"", "take log diff : int, default is 0 number of times to take", "by compounding for _ in range(t['log']): df = np.log(df) for _ in range(t['diff']):", "'RSAFS', 'OILPRICE': 'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div,", "\"\"\" tcode_ = {1: {'diff': 0, 'log': 0}, 2: {'diff': 1, 'log': 0},", "or self.api_key) r = requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series/observations\",", "vintage : int, default is None latest realtime_start date of observations to keep", "url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if args else", "n_obs_per_yr # log = Natural Log ln(x(t)) # Frequency # A = Annual", "x.T @ x = beta.T @ beta is covariance matrix \"\"\" if standardize:", "Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s =", "missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\"", "df = ((1 + df) ** t['annualize']) - 1 # by compounding for", "of factors based on ICp{p} criterion, or 0 if not determined Notes -----", "2)] sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r)", "if url.startswith('http'): url = io.BytesIO(requests.get(url).content) with zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0)", "['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in", "len(s)) mR2 = [0] + list(s**2 / (N * T)) # first case", "metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or self.end),", "no column can be all missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace", "Returns ------- n : int length of observations dataframe \"\"\" if types.is_list_like(series_id): return", "str, default is None subset of header columns to return Returns ------- df", "series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or", "to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] =", "in self.cache_ and not self.get(series_id)): return None if freq is True: freq =", "date, optionally indexed by realtime_start Examples -------- \"\"\" df = observations.copy() df['value'] =", "operator periods : int, default is 1 number of periods to lag for", "\"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x),", "to base url or zipfile archive, or int date YYYYMM url : str,", "DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert fred to", "Else picks one of three methods in Bai & Ng (2002) to auto-determine", "as ET import matplotlib.pyplot as plt from pandas.api import types import time from", "name, from vintage Examples -------- md_df, mt = fredmd(csvfile='Historical FRED-MD Vintages Final/2013-12.csv', url=md_url", "df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df =", "has metadata, e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label", "self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self, *args, echo=None): if echo or", "echo debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert", "api_key=api_key) if 'categories' not in c: return None c = c['categories'][0] c['children'] =", "array T observations/samples in rows, N variables/features in columns kmax : int, default", "var = (sum(mR2) - np.cumsum(mR2)) # variance of residuals after k components lnvar", "api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset += s['limit'] return c def", "(i.e. 'ICp2' criterion) If 0, number of factors is fixed as kmax. Else", "echo=config.ECHO): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD site Parameters", "df = df.diff(periods=t['periods']) df = df * t['annualize'] # by adding return df.shift(t['shift'])", "if len(r) else 0 # first min point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return", "and Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1))", "= df.diff(periods=t['periods']) df = df * t['annualize'] # by adding return df.shift(t['shift']) alfred_api", "end=end) for s in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if", "Wanted Index for United States'], ['AMDMNO', 'New Orders for Durable Goods'], ['S&P 500',", "[self.get(s, start=start, end=end) for s in series_id] series = self.series(series_id, api_key=api_key, start=start, end=end,", "spliced WTI and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA", "if series_id in ['S&P: indust']: s = Series() elif series_id in ['CLAIMS']: df", "every iteration Returns ------- x : 2D arrayint X with nan's replaced by", "'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True},", "= BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b", "def dump(self, savefile=None): \"\"\"Save all memory-cached series data to an output file\"\"\" with", "in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if", "vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series from alfred observations set", "<reponame>terence-lim/investment-data-science \"\"\"Convenience class and methods to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED,", "series to retrieve Returns ------- n : int length of observations dataframe \"\"\"", "s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'],", "series with FRED api Parameters ---------- series_id : str or list of str", "True: s = self.category(category_id, api=\"category/series\", api_key=api_key, offset=offset) if not s['seriess']: break c['series'].extend(s['seriess']) offset", "or self.end, freq=freq) if realtime: s = self.transform(df['value'], **kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end']", "indicates a data value transformation. # lin = Levels (No transformation) [default] #", "factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of factors if p>0", "= BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag", "vintage < 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages", "as pd from pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd", "Series value of each period date, optionally indexed by realtime_start Examples -------- \"\"\"", "= [tag.get('href').split('/')[-1] for tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details =", "int in {1, ..., 7}, default is 1 transformation code freq : str", "4: {'diff': 0, 'log': 1}, 5: {'diff': 1, 'log': 1}, 6: {'diff': 2,", "to take difference pct_change : bool whether to apply pct_change operator periods :", "# passed by reference Y = np.isnan(X) # identify missing entries assert(not np.any(np.all(Y,", "start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper to retrieve full observations of a series", "= df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id] s = multpl(v) elif", "@ x)**2, axis=0).reshape(1, - 1) return mR2 # units - stromg that indicates", "re import xml.etree.ElementTree as ET import matplotlib.pyplot as plt from pandas.api import types", "= f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage < 201500: url_ = url_", "series and apply transforms Parameters ---------- series_id : str or list of str", "e.g. transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label]", "Ng (2002) criterion Parameters ---------- x : 2D array T observations/samples in rows,", "ln(x(t-1))) * 100 # cca = Continuously Compounded Annual Rate of Change ((ln(x(t))", "c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset = 0", "{'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in ['S&P: indust']:", "(x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1)))", "'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad')", "series_id in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq) if isinstance(v, str)", "keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list", "import str2date, to_monthend import config # From https://research.stlouisfed.org/econ/mccracken/fred-databases/ _fred_md_url = 'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args,", "series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True, **kwargs): \"\"\"Select from full observations", "/ np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1) return mR2 # units", ": bool whether to apply pct_change operator periods : int, default is 1", "= df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date df.index =", "with factor model and EM algorithm of <NAME> (1982), Stock & Watson (1998)", "for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial", "number of factors. If None, set to rank from SVD standardize : bool,", "of columns to display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df", "N variables/features in columns kmax : int, default is None Maximum number of", "df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id', drop=False) return df[columns or keep] def", "r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url,", "N variables/features in columns p : int in [1, 2, 3], default is", "is True) resample and replace date index with month ends at selected freqs", "pickle import zipfile import re import xml.etree.ElementTree as ET import matplotlib.pyplot as plt", "release up through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start']", "= self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end", "api_key, start=17760704, end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access and", "approximate factor model, EM algorithm Author: <NAME> License: MIT \"\"\" import os import", "+ release).dt.strftime('%Y-%m-%d') df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start']", "series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get observations and metadata", "df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if", "series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id, start=None, end=None, release=0, vintage=99991231, label=None, realtime=False, freq=True,", "if p>0 else fix number of factors r = BaiNg(X, p, kmax or", "df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or vintage", "0] # keep rows with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d')", "= url or _fred_md_url if isinstance(vintage, int) and vintage: vintage = f\"quarterly/{vintage //", "= np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods']) df = df.diff(periods=t['periods']) df", "+ args if args else '') r = requests_get(url, echo=echo) return dict() if", "int in [1, 2, 3], default is 2 use PCp1 or PCp2 or", "+ list(s**2 / (N * T)) # first case is when no factors", "self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key, args=\"&\" + args if args else '') r", "results 'u', 's', 'vT', 'kmax', 'converge', 'n_iter' \"\"\" X = X.copy() # passed", "{'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df =", "np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return min(r)", "with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None):", "shiller.keys(): v = shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys(): v =", "vintage = f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo)", "df.columns.str.rstrip('x') meta = dict() for _, row in df.iloc[:5].iterrows(): if '/' not in", "'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'): \"\"\"Retrieve a raw series", "of series to retrieve start, end : int, default is None start and", "series_id : str or list of str Labels of series to retrieve start,", "use PCp1 or PCp2 or PCp3 penalty kmax : int, default is None", "soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for tag in tags] return details #tags", "plt from pandas.api import types import time from .edgar import requests_get from .busday", "= Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) * 100", "of series to retrieve Returns ------- n : int length of observations dataframe", "# Seasonal Adjustment # SA = Seasonally Adjusted # NSA = Not Seasonally", "Rate of Change (ln(x(t)) - ln(x(t-1))) * 100 # cca = Continuously Compounded", "<NAME> License: MIT \"\"\" import os import sys import json import io import", "metadata and full observations of a series with FRED api Parameters ---------- series_id", "p : int in [1, 2, 3], default is 2 use PCp1 or", "/ NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1] ic = (lnvar", "category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to retrieve category data as dict\"\"\"", "'seasonal_adjustment_short', 'units_short'] # default list of columns to display for v in self.cache_.values():", "---------- observations: DataFrame from FRED 'series/observations' api call release : pd.DateOffset or int", "t['annualize'] # by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api =", "as f: pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return", "def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable from incrementally adding", "= (sum(mR2) - np.cumsum(mR2)) # variance of residuals after k components lnvar =", "if r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r =", "{1, ..., 7}, default is 1 transformation code freq : str in {'M',", "= str2date(df.iloc[:,0], '%b %d, %Y', '%Y%m%d') df['date'] = to_monthend(df.iloc[:, 0]) df = df.sort_values('Date').groupby('date').last().iloc[:,-1]", "debugging messages\"\"\" if echo: print(*args, **kwargs) def _int2date(date): \"\"\"helper method to convert int", "estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of factors if", "= to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access,", "= url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/' + csvfile_ else:", "zipfile.ZipFile(url).open(vintage) as f: df = pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0)", "0, 'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0, 'log': 0,", "= pd.to_numeric(df['value'], errors='coerce') df = df.sort_values(by=['date', 'realtime_start']) if isinstance(release, int): # keep latest", "(X * std) + mean # undo standardization M['kmax'] = r M['converge'] =", "series = self.series(series_id, api_key=api_key, start=start, end=end, echo=self.echo_) if series is None or series.empty:", "= \"&\".join([f\"{k}={v}\" for k,v in kwargs.items()]) url = self.category_api(api=api, category_id=category_id, api_key=api_key or self.api_key,", "tcode : int in {1, ..., 7}, default is 1 transformation code freq", "date Notes ----- if vintage is int: then derive vintage csv file name", "echo=echo) url = url or url_ if url.endswith('.zip'): if url.startswith('http'): url = io.BytesIO(requests.get(url).content)", "df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for _,", "1 cch = Continuously Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) cca =", "pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y = s * u: T", "vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict() for _, row in df.iloc[:5].iterrows():", "default is None Maximum number of factors. If None, set to rank from", "in self.cache_: # load via api if not in cache self.get(series_id) self.header_[series_id] =", "# A = Annual # SA = Semiannual # Q = Quarterly #", "W = Weekly # D = Daily # Seasonal Adjustment # SA =", "self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series data to an", "number of factors is fixed as kmax. Else picks one of three methods", "with FRED api Parameters ---------- series_id : str or list of str ids", "pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date import requests from", "default is None set periodicity of dates log : int, default is 0", "Year Ago x(t) - x(t-n_obs_per_yr) pch = Percent Change ((x(t)/x(t-1)) - 1) *", "'log': 1}, 6: {'diff': 2, 'log': 1}, 7: {'diff': 1, 'log': 0, 'pct_change':", "in missing data with factor model and EM algorithm of <NAME> (1982), Stock", "# diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\") if M['converge'] < tol: break return", "< 201500: url_ = url_ + 'Historical_FRED-MD.zip' csvfile_ = 'Historical FRED-MD Vintages Final/'", "end=99991231, savefile=None, echo=config.ECHO): \"\"\"Create object, with api_key, for FRED access and data manipulation\"\"\"", "= (self(v, freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else:", "criterion, or 0 if not determined Notes ----- See Bai and Ng (2002)", "Returns ------- mR2 : 2D array each row corresponds to adding one factor", "to an output file\"\"\" with open(savefile or self.savefile, 'wb') as f: pickle.dump(self.cache_, f)", "\"\"\"API wrapper to retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v", "soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag in tags] #return details fred_adjust = {'HWI':", "def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based on Bai", "post-2015 \"\"\" url_ = _fred_md_url if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage", "'MCOILWTICO', 'COMPAPFF': 'CPFF', 'CP3M': 'CPF3M', 'CLAIMS': 'ICNSA', # weekly 'HWIURATIO': [Series.div, 'JTSJOL', 'UNEMPLOY'],", "b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method to convert FRED api", "100 pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch =", "freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X,", "args else '') r = requests_get(url, echo=echo) return dict() if r is None", "else: s = self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id) def pcaEM(X, kmax=None,", "Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_", "FRED-MD site Parameters ---------- vintage : str or int, default 0 (for current.csv)", "\"\"\"Fill in missing data with factor model and EM algorithm of <NAME> (1982),", "column can be all missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace with", "set periodicity of dates log : int, default is 0 number of times", "units - stromg that indicates a data value transformation. # lin = Levels", "= df * t['annualize'] # by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\"", "'log': 0}, 'ch1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pch': {'diff':", "period dates (inclusive) to keep label : str, default is None New label", "# log = Natural Log ln(x(t)) # Frequency # A = Annual #", "M['converge'] < tol: break return X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO):", "Orders for Durable Goods'], ['S&P 500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL',", "from SVD standardize : bool, default is False if True, then standardize data", "self.end), api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is None: url", "local file path or zipfile archive Returns ------- df : DataFrame indexed by", "pct_change operator periods : int, default is 1 number of periods to lag", "all loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata", "replaced by PCA EM model : dict Model results 'u', 's', 'vT', 'kmax',", "Ratio\"], ['S&P: indust', \"S&P's Common Stock Price Index: Industrials\"]]} @classmethod def transform(self, data,", "series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True,", "X with nan's replaced by PCA EM model : dict Model results 'u',", "freq=freq) if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s =", "get(self, series_id, api_key=None, start=None, end=None): \"\"\"Retrieve metadata and full observations of a series", "echo=echo) if r is None: url = self.fred_api(api=\"series/observations\", series_id=series_id, api_key=api_key or self.api_key) r", "df * t['annualize'] # by adding return df.shift(t['shift']) alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format", "# as dict of int codes df = df[df.iloc[:, 0].str.find('/') > 0] #", "def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo: print(*args, **kwargs)", "= np.nanmean(X[:, col]) M = dict() # latest fitted model parameters for M['n_iter']", "Common Stock Price Index: Industrials\"]]} @classmethod def transform(self, data, tcode=1, freq=None, **kwargs): \"\"\"Classmethod", "is None New label to rename returned series release : pd.DateOffset or int", "'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'],", "------- mR2 : 2D array each row corresponds to adding one factor component", "1 transformation code freq : str in {'M', 'Q', 'A'}, default is None", "\"\"\"Retrieve metadata and full observations of a series with FRED api Parameters ----------", "loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata row)", "if types.is_list_like(series_id): return [self.get(s, start=start, end=end) for s in series_id] series = self.series(series_id,", "to memory cache from saved file\"\"\" with open(savefile or self.savefile, 'rb') as f:", "in row[0]: # this row has metadata, e.g. transform codes label = re.sub(\"[^a-z]\",", "return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_),", "'pc1': {'diff': 0, 'log': 0, 'pct_change': True, 'periods': 12}, 'pca': {'diff': 1, 'log':", "None) @classmethod def as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select", "vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y = s *", "np.linalg.svd(x, full_matrices=False) # increase in R2 from adding kth (orthogonal) factor as a", "consumer credit to Personal Income'], ['S&P div yield', \"S&P's Composite Common Stock: Dividend", "len(s)) + 1)]) mR2 = mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1,", "17760704 end = 99991231 echo_ = config.ECHO api_key = None def header(self, series_id,", "PCp3 penalty kmax : int, default is None maximum number of factors. If", "requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1]", "url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0]", "is when no factors used var = (sum(mR2) - np.cumsum(mR2)) # variance of", "if isinstance(v, str) \\ else v[0](self(v[1], freq=freq), self(v[2], freq=freq))) else: s = self(series_id,", "'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df = df.pct_change(fill_method='pad') df", "df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize']) - 1", "True: freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or", "t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 + df) **", "x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u, s,", "default is 2 use PCp1 or PCp2 or PCp3 penalty kmax : int,", "subset of header columns to return Returns ------- df : DataFrame headers of", "before processing (works better) Returns ------- mR2 : 2D array each row corresponds", "return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata row) of all loaded", "indust']: s = Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] =", "df.iloc[:5].iterrows(): if '/' not in row[0]: # this row has metadata, e.g. transform", "mt = fredmd(csvfile='monthly/2015-05.csv', url=md_url + 'FRED_MD.zip') # post-2015 \"\"\" url_ = _fred_md_url if", "is True: freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start", "\"\"\"Return headers (last metadata row) of all loaded series Parameters ---------- columns: list", "X = (X - mean) / std # standardize # \"M\" step: estimate", "to apply time series transformations Parameters ---------- data : DataFrame input data tcode", "https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) # y", "= Seasonally Adjusted Annual Rate # SSA = Smoothed Seasonally Adjusted # NA", "x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False) # increase in", "of times to take log diff : int, default is 0 number of", "df def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not", "echo=self.echo_) if series is None or series.empty: return 0 self.cache_[series_id] = { 'observations':", "# SA = Semiannual # Q = Quarterly # M = Monthly #", "_ in range(t['log']): df = np.log(df) for _ in range(t['diff']): #df = df.fillna(method='pad').diff(periods=t['periods'])", "with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return", "shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio': 'shiller-pe'} if series_id in", "base url or zipfile archive, or int date YYYYMM url : str, default", "df['date'] += QuarterEnd(1) if freq.upper()[0] in ['Q']: df['date'] += QuarterEnd(0) if freq.upper()[0] in", "header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta = dict()", "- mean) / std # standardize # \"M\" step: estimate factors M['u'], M['s'],", "T) penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2) / C2][p -", "= Daily # Seasonal Adjustment # SA = Seasonally Adjusted # NSA =", "f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] =", "tcode_ : dict transformation codes Notes ----- lin = Levels (No transformation) [default]", "c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset", "= DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] #", "f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def", "Parameters ---------- series_id : str or list of str ids of series to", ": dict transformation codes Notes ----- lin = Levels (No transformation) [default] chg", "PE ratio', \"S&P's Composite Common Stock: Price-Earnings Ratio\"], ['S&P: indust', \"S&P's Common Stock", "transformation codes Notes ----- lin = Levels (No transformation) [default] chg = Change", "cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"***", "Annual Rate # SSA = Smoothed Seasonally Adjusted # NA = Not Applicable", "chg = Change x(t) - x(t-1) # ch1 = Change from Year Ago", "url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r", "is None: return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now())", "df : DataFrame indexed by end-of-month date Notes ----- if csvfile is int:", "provided else series_id \"\"\" if (series_id not in self.cache_ and not self.get(series_id)): return", "= df[df['release'] + 1 == (release or 99999999)]\\ .append(df.drop_duplicates('date', keep='last'))\\ .drop_duplicates('date', keep='first') else:", "echo=echo) return dict() if r is None else json.loads(r.content) @classmethod def popular(self, page=1):", "url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a',", "= api_key self.start = start self.end = end self.savefile = savefile self.cache_ =", ": int length of observations dataframe \"\"\" if types.is_list_like(series_id): return [self.get(s, start=start, end=end)", "c['series'].extend(s['seriess']) offset += s['limit'] return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs):", "df = DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short']", "r = requests_get(url, echo=echo) return dict() if r is None else json.loads(r.content) @classmethod", "return df def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories'", "- ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r) else 0 # first", "url is None: then derive subfolder name from vintage \"\"\" url = url", "int number of difference, log and pct_change operations to apply freq : str", "None Latest realtime_start date (inclusive) allowed Returns ------- out: Series value of each", "codes df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with valid date", "point def marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable from incrementally", "= self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key) r = requests_get(url, echo=echo) if r is", "else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns =", "or series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end,", "freq = self.header(series_id, 'frequency_short') df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start,", "series_id, column='title'): \"\"\"Returns a column from last meta record of a series\"\"\" if", "then standardize data before processing (works better) Returns ------- r : int best", "column='title'): \"\"\"Returns a column from last meta record of a series\"\"\" if series_id", "bool, default is False if True, then standardize data before processing (works better)", "Parameters ---------- columns: list of str, default is None subset of header columns", "from pandas.api import types import time from .edgar import requests_get from .busday import", "= pd.read_csv(f, header=0) else: df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x') meta", "a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start), end=_int2date(end or", "\"\"\"Returns a column from last meta record of a series\"\"\" if series_id not", "return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert fred", "is fixed as kmax. Else picks one of three methods in Bai &", "variable from incrementally adding factors Parameters ---------- x : 2D array T observations/samples", "times to take log diff : int, default is 0 number of times", "errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq: if freq.upper()[0] in ['A']:", "factors r = BaiNg(X, p, kmax or len(M['s'])-1) if p else kmax or", "mR2 # units - stromg that indicates a data value transformation. # lin", "M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based on", "k, 'title': v} for k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury", "def multpl(page): \"\"\"Helper method to retrieve shiller series by parsing multpl.com web page\"\"\"", "Compounded Rate of Change (ln(x(t)) - ln(x(t-1))) * 100 # cca = Continuously", "date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to retrieve", "cached series and observations tcode_ : dict transformation codes Notes ----- lin =", "------- Series or DataFrame transformed values, name set to label if provided else", "select a series from alfred observations set Parameters ---------- observations: DataFrame from FRED", "echo=echo) if r is None: return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations'])", "csv file name from input date YYYYMM if url is None: then derive", "get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key) if 'categories' not in c:", "= str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class", "tag in tags] #return details fred_adjust = {'HWI': 'JTSJOL', 'AMDMNO': 'DGORDER', 'S&P 500':", "axis=0)): # replace with column means X[Y[:, col], col] = np.nanmean(X[:, col]) M", "s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or series_id) def __getitem__(self, series_id): \"\"\"Get", "str, default is None base name of url, local file path or zipfile", "csvfile_ vintage = csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url", "= ['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of", "= to_monthend(df.index) s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id] s", "to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access, and", "convert fred to alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f,", "data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last metadata row) of all", "is None else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape popular", "x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x", "to display for v in self.cache_.values(): df = df.append(v['series'].iloc[-1], ignore_index=True) df = df.set_index('id',", "or PCp2 or PCp3 penalty kmax : int, default is None maximum number", "series to retrieve start, end : int, default is None start and end", "series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations']) def __call__(self, series_id,", "r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo: print(f\"{M['n_iter']:4d} {M['converge']:8.3g} {r}\")", "apply time series transformations Parameters ---------- data : DataFrame input data tcode :", "the column Notes ----- See <NAME> Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:]", "'monthly/' + csvfile_ vintage = csvfile_ else: vintage = vintage or 'monthly/current.csv' _print(vintage,", "= np.flatnonzero(sign>0) return min(r) if len(r) else 0 # first min point def", "a series with FRED api Parameters ---------- series_id : str or list of", "api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata as dataframe\"\"\" url", "if series_id not in self.header_: try: if series_id not in self.cache_: # load", "C2 = min(N, T) penalty = [np.log(NT2) / NT2, np.log(C2) / NT2, np.log(C2)", "@ x)**2, axis=0) for k in (np.arange(kmax or len(s)) + 1)]) mR2 =", "# Q = Quarterly # M = Monthly # BW = Biweekly #", "'https://files.stlouisfed.org/files/htdocs/fred-md/' def _print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo: print(*args,", "factor as a regressor mR2 = np.vstack([np.mean((u[:,k-1:k] @ u[:,k-1:k].T @ x)**2, axis=0) for", "int, default is 1 number of periods to lag for pct_change or diff", "pc1 = Percent Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 #", "wrapper to retrieve category data as dict\"\"\" args = \"&\".join([f\"{k}={v}\" for k,v in", "in {1, ..., 7}, default is 1 transformation code freq : str in", "'CPF3MTB3M': [Series.sub, 'CPF3M', 'DTB3'], 'CONSPI': [Series.div, 'NONREVSL', 'PI']} def adjusted_series(self, series_id, start=19590101, freq='M'):", "on ICp{p} criterion, or 0 if not determined Notes ----- See Bai and", "transformation code freq : str in {'M', 'Q', 'A'}, default is None set", "elif series_id in shiller.keys(): v = shiller[series_id] s = multpl(v) elif series_id in", "step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of factors", "is None maximum number of factors. If None, set to rank from SVD", "_print(*args, echo=config.ECHO, **kwargs): \"\"\"helper to echo debugging messages\"\"\" if echo: print(*args, **kwargs) def", "full_matrices=False) # increase in R2 from adding kth (orthogonal) factor as a regressor", ": int, default is 0 number of rows to shift output (negative to", "return Returns ------- df : DataFrame headers of all series loaded \"\"\" df", "__getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod def", "as_series(self, observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series from", "'log': 1}} header_ = { k : {'id': k, 'title': v} for k,v", "requests_get(url, echo=echo) if r is None: url = self.fred_api(api=\"series\", series_id=series_id, api_key=api_key or self.api_key)", "self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = [] offset = 0 while True: s", "s = Series() elif series_id in ['CLAIMS']: df = DataFrame(self('ICNSA')) df['Date'] = to_monthend(df.index)", "series\"\"\" if series_id not in self.header_: try: if series_id not in self.cache_: #", "whether to apply pct_change operator periods : int, default is 1 number of", "None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories', []) c['series'] = []", "df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return self.transform(df['value'], **kwargs).rename(label or", "(lnvar + np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0)", "{ 'observations': self.series_observations( series_id, api_key=api_key, start=start, end=end, alfred_mode=True, echo=self.echo_), 'series': series} return len(self.cache_[series_id]['observations'])", "int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage <", "header_ = { k : {'id': k, 'title': v} for k,v in [['CPF3MTB3M',", "freq : str in {'M', 'Q', 'A'}, default is None set periodicity of", "id names of all loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return", "of all loaded series data\"\"\" return list(self.cache_.keys()) def values(self, columns=None): \"\"\"Return headers (last", "---------- columns: list of str, default is None subset of header columns to", "BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table') df = pd.read_html(tables[0].decode())[0] df.iloc[:,0] = str2date(df.iloc[:,0], '%b %d,", "series_id : str or list of str ids of series to retrieve Returns", "12}, 'pca': {'diff': 1, 'log': 1, 'annualize': 12}, 'cch': {'diff': 1, 'log': 1},", "1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current or vintage csv", "if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] +=", "else json.loads(r.content) @classmethod def popular(self, page=1): \"\"\"Classmethod to web scrape popular series names,", "s = multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v,", "Parameters ---------- data : DataFrame input data tcode : int in {1, ...,", "no row can be all missing assert(not np.any(np.all(Y, axis=0))) # no column can", "of factors based on Bai & Ng (2002) criterion Parameters ---------- x :", "and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'], ['COMPAPFF', \"3-Month", "vintage=vintage, start=start or self.start, end=end or self.end, freq=freq) if realtime: s = self.transform(df['value'],", "s['limit'] return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper to", "# no column can be all missing for col in np.flatnonzero(np.any(Y, axis=0)): #", "shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s =", "\"\"\" url_ = _fred_md_url if isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage //", "to retrieve shiller series by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup", "in self.header_: try: if series_id not in self.cache_: # load via api if", "periods to lag for pct_change or diff operator annualize : int. default is", "df = df.set_index('date').sort_index().drop(columns=['release']) return df[(df.index <= min(end, vintage)) & (df.index >= start)] def", "three methods in Bai & Ng (2002) to auto-determine number in every iteration", "rows, N variables/features in columns p : int in [1, 2, 3], default", "(works better) Returns ------- r : int best number of factors based on", "f: self.cache_.update(**pickle.load(f)) return len(self.cache_) def dump(self, savefile=None): \"\"\"Save all memory-cached series data to", "cca = Continuously Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr", "set to rank from SVD minus 1 p : int in [0, 1,", "row[1:].astype(int).to_dict() # as dict of int codes df = df[df.iloc[:, 0].str.find('/') > 0]", "Returns ------- df : DataFrame indexed by end-of-month date Notes ----- if csvfile", "['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13) if freq.upper()[0]", "is None Latest realtime_start date (inclusive) allowed Returns ------- out: Series value of", "0, 'log': 1}, 5: {'diff': 1, 'log': 1}, 6: {'diff': 2, 'log': 1},", "of Change (ln(x(t)) - ln(x(t-1))) * 100 # cca = Continuously Compounded Annual", "def load(self, savefile=None): \"\"\"Load series data to memory cache from saved file\"\"\" with", "or bool (default is True) resample and replace date index with month ends", "1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0}, 'log': {'diff': 0, 'log': 1}}", "to retrieve start, end : int, default is None start and end period", "['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if freq.upper()[0]", "in Bai & Ng (2002) to auto-determine number in every iteration Returns -------", "to keep label : str, default is None New label to rename returned", "pca = Compounded Annual Rate of Change (((x(t)/x(t-1)) ** (n_obs_per_yr)) - 1) *", "# variance of residuals after k components lnvar = np.log(np.where(var > 0, var,", "vintages - PCA, approximate factor model, EM algorithm Author: <NAME> License: MIT \"\"\"", "YYYYMM url : str, default is None base name of url, local file", "self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or self.end, freq=freq) if realtime:", "s * u: T x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_ #", "len(M['s'])-1 # \"E\" step: update missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r])", "str, default is None New label to rename returned series release : pd.DateOffset", "update missing entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] #", "Income'], ['S&P div yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE ratio',", "import datetime, date import requests from bs4 import BeautifulSoup from io import StringIO", "df = df[df['realtime_start'] <= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start'])", "fix number of factors r = BaiNg(X, p, kmax or len(M['s'])-1) if p", "to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and parse current", "input date YYYYMM if url is None: then derive subfolder or zip archive", "str or int, default 0 (i.e. current.csv) file name relative to base url", "(ln(x(t)) - ln(x(t-1))) cca = Continuously Compounded Annual Rate of Change (ln(x(t)) -", "= ((1 + df) ** t['annualize']) - 1 # by compounding for _", "McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape", "col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:, col], col] =", "if url is None: then derive subfolder or zip archive name, from vintage", "pca.transform(x) # y = s * u: T x n \"projection\" beta =", "realtime=False, freq=True, **kwargs): \"\"\"Select from full observations of a series and apply transforms", "int, default is 0 number of times to take difference pct_change : bool", "realtime_start date (inclusive) allowed Returns ------- out: Series value of each period date,", "of int codes df = df[df.iloc[:, 0].str.find('/') > 0] # keep rows with", "**kwargs).to_frame() s['realtime_start'] = df['realtime_start'].values s['realtime_end'] = df['realtime_end'].values return s.rename(columns={'value': label or series_id}) return", "Compounded Annual Rate of Change (ln(x(t)) - ln(x(t-1))) * n_obs_per_yr log = Natural", "set Parameters ---------- observations: DataFrame from FRED 'series/observations' api call release : pd.DateOffset", "X[Y] = y[Y] X = (X * std) + mean # undo standardization", "transform codes label = re.sub(\"[^a-z]\", '', row[0].lower()) # simplify label str meta[label] =", "f\"quarterly/{vintage // 100}-{vintage % 100:02d}.csv\" else: vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df =", "def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None) def get(self, series_id, api_key=None,", "else latest release up through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d') df", "start and end period dates (inclusive) to keep label : str, default is", "def __getitem__(self, series_id): \"\"\"Get observations and metadata for {series_id}\"\"\" return self.cache_.get(series_id, None) @classmethod", "T)) # first case is when no factors used var = (sum(mR2) -", "series_id in shiller.keys(): v = shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys():", "indexed by realtime_start Examples -------- \"\"\" df = observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce')", "min(len(s), kmax or len(s)) mR2 = [0] + list(s**2 / (N * T))", "['S&P div yield', \"S&P's Composite Common Stock: Dividend Yield\"], ['S&P PE ratio', \"S&P's", "all missing for col in np.flatnonzero(np.any(Y, axis=0)): # replace with column means X[Y[:,", "(1982), Stock & Watson (1998) and Bai & Ng (2002) Parameters ---------- X", "observations of a series as dataframe\"\"\" url = self.alfred_api(api=\"series/observations\", series_id=series_id, start=_int2date(start or self.start),", "df.loc[f, 'date'] return df def get_category(self, category_id, api_key=None): c = self.category(category_id, api=\"category\", api_key=api_key)", "\"\"\" if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False)", "DataFrame(meta) class Alfred: \"\"\"Base class for Alfred/Fred access, and manipulating retrieved data series", "observations tcode_ : dict transformation codes Notes ----- lin = Levels (No transformation)", "dict cached series and observations tcode_ : dict transformation codes Notes ----- lin", "is None or series.empty: return 0 self.cache_[series_id] = { 'observations': self.series_observations( series_id, api_key=api_key,", "vT = np.linalg.svd(x, full_matrices=False) kmax = min(len(s), kmax or len(s)) mR2 = [0]", "['S&P 500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food Services", "- 1) * 100 # pca = Compounded Annual Rate of Change (((x(t)/x(t-1))", "multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id] s = (self(v, freq=freq) if", "are the incremental R2 for the variable in the column Notes ----- See", "df[(df.index <= min(end, vintage)) & (df.index >= start)] def series(self, series_id, api_key=None, start=None,", "None New label to rename returned series release : pd.DateOffset or int (default", "standardization M['kmax'] = r M['converge'] = np.sum((X - old)**2)/np.sum(X**2) # diff**2/prev**2 if echo:", "in {'M', 'A'. 'Q', 'D', 'Y'} or bool (default is True) resample and", "Price Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced", "df) ** t['annualize']) - 1 # by compounding for _ in range(t['log']): df", "alfred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&realtime_start={start}&realtime_end={end}\" \"&api_key={api_key}&file_type=json\").format fred_api = (\"https://api.stlouisfed.org/fred/{api}?series_id={series_id}\" \"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\"", "base name of url, local file path or zipfile archive Returns ------- df", "1}, 'cca': {'diff': 1, 'log': 1, 'annualize': 12}, 'lin': {'diff': 0, 'log': 0},", "in {'M', 'Q', 'A'}, default is None set periodicity of dates log :", "pd from pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd, QuarterEnd from", "* n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff': 0,", "df = self.as_series( self[series_id]['observations'], release=release, vintage=vintage, start=start or self.start, end=end or self.end, freq=freq)", "observations, release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series from alfred", "freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date'] += pd.DateOffset(days=13)", "tag in tags] return details #tags = soup.findAll(name='input',attrs={'class':'pager-item-checkbox'}) #details = [tag.get('value') for tag", "json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: # convert fred to alfred by backfilling", "\"\"\"Helper method to retrieve shiller series by parsing multpl.com web page\"\"\" url =", "Change from Year Ago ((x(t)/x(t-n_obs_per_yr)) - 1) * 100 pca = Compounded Annual", "axis=1))) # no row can be all missing assert(not np.any(np.all(Y, axis=0))) # no", "[0] + list(s**2 / (N * T)) # first case is when no", "via api if not in cache self.get(series_id) self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"***", "'', row[0].lower()) # simplify label str meta[label] = row[1:].astype(int).to_dict() # as dict of", "is None: return DataFrame() contents = json.loads(r.content) df = DataFrame(contents['observations']) if alfred_mode: #", "is None Maximum number of factors. If None, set to rank from SVD", "observations.copy() df['value'] = pd.to_numeric(observations['value'], errors='coerce') df['date'] = pd.to_datetime(df['date']) df = df.dropna().reset_index(drop=True) if freq:", "'lin': {'diff': 0, 'log': 0}, 'chg': {'diff': 1, 'log': 0}, 'ch1': {'diff': 0,", "pd.DateOffset(days=13) if freq.upper()[0] in ['W']: df['date'] += pd.DateOffset(days=6) if np.any(df['realtime_start'] <= _int2date(vintage)): df", "Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously Compounded Rate of", "soup = BeautifulSoup(data, 'lxml') tags = soup.findAll(name='a', attrs={'class': 'series-title'}) details = [tag.get('href').split('/')[-1] for", "s = df.groupby('Date').mean().iloc[:,0] elif series_id in shiller.keys(): v = shiller[series_id] s = multpl(v)", "echo=ECHO): \"\"\"API wrapper to retrieve series metadata as dataframe\"\"\" url = self.alfred_api(api=\"series\", series_id=series_id,", "self.header_[series_id] = self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\")", ": int in {1, ..., 7}, default is 1 transformation code freq :", "= df.pct_change(fill_method=None) df = ((1 + df) ** t['annualize']) - 1 # by", "savefile self.cache_ = dict() self.header_ = Alfred.header_.copy() self.echo_ = echo def _print(self, *args,", "= Natural Log ln(x(t)) # Frequency # A = Annual # SA =", "str Labels of series to retrieve start, end : int, default is None", "None latest realtime_start date of observations to keep diff, log, pct_change : int", "from last meta record of a series\"\"\" if series_id not in self.header_: try:", "month ends at selected freqs Returns ------- Series or DataFrame transformed values, name", "['RETAIL', \"Retail and Food Services Sales\"], ['OILPRICE', 'Crude Oil, spliced WTI and Cushing'],", "in (np.arange(kmax or len(s)) + 1)]) mR2 = mR2 / np.mean((u @ u.T", "vintage : str or int, default 0 (for current.csv) file name relative to", "to max release df['release'] = df.groupby('date').cumcount() df = df[df['release'] + 1 == (release", "series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series metadata as", "savefile=None): \"\"\"Save all memory-cached series data to an output file\"\"\" with open(savefile or", "return DataFrame() v = json.loads(r.content) df = DataFrame(v['seriess']) df.index.name = str(datetime.now()) return df", "marginalR2(x, kmax=None, standardize=False): \"\"\"Return marginal R2 of each variable from incrementally adding factors", "pandas as pd from pandas import DataFrame, Series from pandas.tseries.offsets import MonthEnd, YearEnd,", "2 use PCp1 or PCp2 or PCp3 penalty kmax : int, default is", "in date] if types.is_list_like(date) else int(re.sub('\\D', '', str(date)[:10]))) def multpl(page): \"\"\"Helper method to", "\"\"\"Convenience class and methods to access ALFRED/FRED apis and FRED-MD/FRED-QD - FRED, ALFRED,", "# pch = Percent Change ((x(t)/x(t-1)) - 1) * 100 # pc1 =", "100) * n_obs_per_yr # log = Natural Log ln(x(t)) # Frequency # A", "or int (default is 0) maximum release number or date offset (inclusive). If", "url is None: then derive subfolder or zip archive name, from vintage Examples", "np import pandas as pd from pandas import DataFrame, Series from pandas.tseries.offsets import", "start)] def series(self, series_id, api_key=None, start=None, end=None, echo=ECHO): \"\"\"API wrapper to retrieve series", "diff operator annualize : int. default is 1 annualization factor shift : int,", "pca = Compounded Annual Rate of Change (x(t)/x(t-1))**n_obs_per_yr - 1 cch = Continuously", "df.index = to_monthend(df.index) return df.iloc[:, 1:], DataFrame(meta) def fred_qd(vintage=0, url=None, echo=False): \"\"\"Retrieve and", "1 # by compounding for _ in range(t['log']): df = np.log(df) for _", "vintage = vintage or 'monthly/current.csv' _print(vintage, echo=echo) url = url or url_ if", "QuarterEnd(0) if freq.upper()[0] in ['M']: df['date'] += MonthEnd(0) if freq.upper()[0] in ['B']: df['date']", "X, M def BaiNg(x, p=2, kmax=None, standardize=False, echo=ECHO): \"\"\"Determine number of factors based", "\"&api_key={api_key}&file_type=json\").format category_api = (\"https://api.stlouisfed.org/fred/{api}?\" \"category_id={category_id}&api_key={api_key}&\" \"file_type=json{args}\").format start = 17760704 end = 99991231 echo_", "if t['pct_change']: #df = df.pct_change(fill_method='pad') df = df.pct_change(fill_method=None) df = ((1 + df)", "str ids of series to retrieve Returns ------- n : int length of", "n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor model and EM algorithm of", "release=0, vintage=99991231, start=0, end=99991231, freq=None): \"\"\"Classmethod to select a series from alfred observations", "on Bai & Ng (2002) criterion Parameters ---------- x : 2D array T", "current or vintage csv from McCracken FRED-MD site Parameters ---------- vintage : str", "(i.e. current.csv) file name relative to base url or zipfile archive, or int", "\"\"\" url = url or _fred_md_url if isinstance(vintage, int) and vintage: vintage =", "1 annualization factor shift : int, default is 0 number of rows to", "= {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df = data.sort_index() if t['pct_change']: #df", "if True, then standardize data before processing (works better) Returns ------- r :", "NT2, np.log(C2) / NT2, np.log(C2) / C2][p - 1] ic = (lnvar +", "if standardize: x = (x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1, -1) u, s, vT = np.linalg.svd(x, full_matrices=False) #", "Notes ----- if vintage is int: then derive vintage csv file name from", "mR2 / np.mean((u @ u.T @ x)**2, axis=0).reshape(1, - 1) return mR2 #", "vintage = 'quarterly/current.csv' _print(vintage, echo=echo) df = pd.read_csv(os.path.join(url, vintage), header=0) df.columns = df.columns.str.rstrip('x')", "= s * u: T x n \"projection\" beta = np.diag(pca.singular_values_) @ pca.components_", "'log': 0}, 4: {'diff': 0, 'log': 1}, 5: {'diff': 1, 'log': 1}, 6:", "X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X - mean)", "ic[:-1]) r = np.flatnonzero(sign>0) return min(r) if len(r) else 0 # first min", "in rows, N variables/features in columns p : int in [1, 2, 3],", "kmax=None, p=2, tol=1e-12, n_iter=2000, echo=ECHO): \"\"\"Fill in missing data with factor model and", "pickle.dump(self.cache_, f) return len(self.cache_) def clear(self): self.cache_.clear() def pop(self, series_id): return self.cache_.pop(series_id, None)", "= X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X -", "# else latest release up through date offset df['release'] = (df['date'] + release).dt.strftime('%Y-%m-%d')", "r = requests_get(url, echo=echo) if r is None: return DataFrame() contents = json.loads(r.content)", "(series_id not in self.cache_ and not self.get(series_id)): return None if freq is True:", "0, 'pct_change': True, 'periods': 12}, 'pch': {'diff': 0, 'log': 0, 'pct_change': True}, 'pc1':", "500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food Services Sales\"],", "str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API wrapper", "import MonthEnd, YearEnd, QuarterEnd from datetime import datetime, date import requests from bs4", "r : int best number of factors based on ICp{p} criterion, or 0", "Treasury Bill'], ['CLAIMS', 'Initial Claims'], ['HWIURATIO', 'Ratio of Help Wanted/No. Unemployed'], ['HWI', 'Help", "M['s'], M['vT'] = np.linalg.svd(X) # auto-select number of factors if p>0 else fix", "url=None, echo=False): \"\"\"Retrieve and parse current or vintage csv from McCracken FRED-MD site", "to apply pct_change operator periods : int, default is 1 number of periods", "rows, N variables/features in columns kmax : int, default is None maximum number", "one of three methods in Bai & Ng (2002) to auto-determine number in", "alfred by backfilling realtime_start f = (df['realtime_start'].eq(contents['realtime_start']) & df['realtime_end'].eq(contents['realtime_end'])).values df.loc[f, 'realtime_start'] = df.loc[f,", "if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1)", "\"\"\"Retrieve a raw series to update FRED-MD dataset Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller", "= self[series_id]['series'].iloc[-1] except: return f\"*** {series_id} ***\" return self.header_[series_id].get(column, f\"*** {series_id} ***\") def", "or date offset (inclusive). If 0: latest vintage : int, default is None", "vintage : str or int, default 0 (i.e. current.csv) file name relative to", "as dict of int codes df = df[df.iloc[:, 0].str.find('/') > 0] # keep", "with api_key, for FRED access and data manipulation\"\"\" self.api_key = api_key self.start =", "<= df['release']]\\ .drop_duplicates('date', keep='last') df['date'] = df['date'].dt.strftime('%Y%m%d').astype(int) df['realtime_start'] = _date2int(df['realtime_start']) df['realtime_end'] = _date2int(df['realtime_end'])", "from full observations of a series and apply transforms Parameters ---------- series_id :", ": 2D array T observations/samples in rows, N variables/features in columns p :", "= 'monthly/' + csvfile_ vintage = csvfile_ else: vintage = vintage or 'monthly/current.csv'", "from saved file\"\"\" with open(savefile or self.savefile, 'rb') as f: self.cache_.update(**pickle.load(f)) return len(self.cache_)", "latest realtime_start date of observations to keep diff, log, pct_change : int number", "replace date index with month ends at selected freqs Returns ------- Series or", "def _int2date(date): \"\"\"helper method to convert int date to FRED api string format\"\"\"", "(negative to lag) \"\"\" t = {'periods':1, 'shift':0, 'pct_change':False, 'annualize':1} t.update(self.tcode_[tcode]) t.update(kwargs) df", "ln(x(t-1))) * n_obs_per_yr log = Natural Log ln(x(t)) \"\"\" tcode_ = {1: {'diff':", "transformations Parameters ---------- data : DataFrame input data tcode : int in {1,", "int: then derive vintage csv file name from input date YYYYMM if url", "and Cushing'], ['COMPAPFF', \"3-Month Commercial Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial", "+ np.arange(len(mR2))*penalty)[:(kmax + 2)] sign = np.sign(ic[1:] - ic[:-1]) r = np.flatnonzero(sign>0) return", "Goods'], ['S&P 500', \"S&P's Common Stock Price Index: Composite\"], ['RETAIL', \"Retail and Food", "multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser') tables = soup.findChildren('table')", "rows with valid date df.index = str2date(df.iloc[:, 0], '%m/%d/%Y', '%Y%m%d') df.index = to_monthend(df.index)", "import numpy as np import pandas as pd from pandas import DataFrame, Series", "if args else '') r = requests_get(url, echo=echo) return dict() if r is", "else \"-\".join(str(date)[a:b] for a, b in [[0,4], [4,6], [6,8]])) def _date2int(date): \"\"\"helper method", "# by compounding for _ in range(t['log']): df = np.log(df) for _ in", "Ng (2002) and McCracken at https://research.stlouisfed.org/econ/mccracken/fred-databases/ \"\"\" if standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T,", "= np.diag(pca.singular_values_) @ pca.components_ # \"loadings\" x.T @ x = beta.T @ beta", "in ['A']: df['date'] += YearEnd(0) if freq.upper()[0] in ['S']: df['date'] += QuarterEnd(1) if", "['id', 'observation_start', 'observation_end', 'frequency_short', 'title', 'popularity', 'seasonal_adjustment_short', 'units_short'] # default list of columns", "old = X.copy() mean, std = X.mean(axis=0).reshape(1, -1), X.std(axis=0).reshape(1, -1) X = (X", "at https://research.stlouisfed.org/econ/mccracken/fred-databases/ pca.components_[i,:] is vT[i, :] pca.explained_variance_ is s**2/(T-1) y = pca.transform(x) #", "apis and FRED-MD/FRED-QD - FRED, ALFRED, revisions vintages - PCA, approximate factor model,", "out: Series value of each period date, optionally indexed by realtime_start Examples --------", "series by parsing multpl.com web page\"\"\" url = f\"https://www.multpl.com/{page}/table/by-month\" soup = BeautifulSoup(requests.get(url).content, 'html.parser')", "date index with month ends at selected freqs Returns ------- Series or DataFrame", "freq=freq), self(v[2], freq=freq))) else: s = self(series_id, auto_request=True, freq=freq) return s[s.index >= start].rename(series_id)", "NT2 = (N * T)/(N + T) C2 = min(N, T) penalty =", "entries y = M['u'][:, :r] @ np.diag(M['s'][:r]) @ M['vT'][:r, :] # \"E\" step", "pca.components_ # \"loadings\" x.T @ x = beta.T @ beta is covariance matrix", "k,v in [['CPF3MTB3M', '3-Month Commercial Paper Minus 3-Month Treasury Bill'], ['CLAIMS', 'Initial Claims'],", "Paper Minus FEDFUNDS\"], ['CP3M', \"3-Month AA Financial Commercial Paper Rates\"], ['CONSPI', 'Nonrevolving consumer", "isinstance(vintage, int) and vintage: csvfile_ = f\"{vintage // 100}-{vintage % 100:02d}.csv\" if vintage", "np.log(np.where(var > 0, var, 1e-26)) NT2 = (N * T)/(N + T) C2", "and end period dates (inclusive) to keep label : str, default is None", "v = shiller[series_id] s = multpl(v) elif series_id in self.fred_adjust.keys(): v = adjust[series_id]", "series loaded \"\"\" df = DataFrame() keep = ['id', 'observation_start', 'observation_end', 'frequency_short', 'title',", "date offset (inclusive). If 0: latest vintage : int, default is None latest", "import json import io import numpy as np import pandas as pd from", "= None def header(self, series_id, column='title'): \"\"\"Returns a column from last meta record", "= str(datetime.now()) return df def series_observations(self, series_id, api_key=None, start=None, end=None, alfred_mode=False, echo=ECHO): \"\"\"API", "+= s['limit'] return c def category(self, category_id, api=\"category\", api_key=None, echo=ECHO, **kwargs): \"\"\"API wrapper", "by page number\"\"\" assert(page > 0) url = f\"https://fred.stlouisfed.org/tags/series?ob=pv&pageID={page}\" data = requests.get(url).content soup", "not in c: return None c = c['categories'][0] c['children'] = self.category(category_id, api=\"category/children\", api_key=api_key).get('categories',", "3], default is 2 (i.e. 'ICp2' criterion) If 0, number of factors is", "# standardize # \"M\" step: estimate factors M['u'], M['s'], M['vT'] = np.linalg.svd(X) #", "standardize: x = ((x-x.mean(axis=0).reshape(1,-1))/x.std(axis=0,ddof=0).reshape(1,-1)) T, N = x.shape #mR2 = np.sum(marginalR2(x), axis=1) u,", "date of observations to keep diff, log, pct_change : int number of difference,", "Notes ----- http://www.econ.yale.edu/~shiller/data/ie_data.xls \"\"\" shiller = {'S&P div yield': 's-p-500-dividend-yield', 'S&P PE ratio':" ]
[ "return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text", "PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message def", "Leaf import menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt", "prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer,", "self.tags = [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity = kwargs[\"target_entity\"] return", "__init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs):", "def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message def prompt_player(self,", "def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf):", "menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags", "= [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity = kwargs[\"target_entity\"] return start_accept_reject_prompt(target_entity.game_state.value.menu_prompt_stack,", "import Leaf import menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message):", "super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity =", "self).__init__() self.tags = [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity = kwargs[\"target_entity\"]", "self.text = message def prompt_player(self, **kwargs): target_entity = kwargs[\"target_entity\"] return start_accept_reject_prompt(target_entity.game_state.value.menu_prompt_stack, target_entity.game_state.value, self.text)", "'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class", "= menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__()", "message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message):", "compositecore import Leaf import menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state,", "menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack,", "[\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity = kwargs[\"target_entity\"] return start_accept_reject_prompt(target_entity.game_state.value.menu_prompt_stack, target_entity.game_state.value,", "message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message def prompt_player(self, **kwargs): target_entity", "game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"]", "state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt))", "prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text =", "= 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result", "__author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return", "import menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt =", "import state __author__ = 'co' def start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message)", "start_accept_reject_prompt(state_stack, game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def", "from compositecore import Leaf import menu import state __author__ = 'co' def start_accept_reject_prompt(state_stack,", "game_state, message): prompt = menu.AcceptRejectPrompt(state_stack, message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self,", "message) game_state.start_prompt(state.UIState(prompt)) return prompt.result class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags =", "class PromptPlayer(Leaf): def __init__(self, message): super(PromptPlayer, self).__init__() self.tags = [\"prompt_player\"] self.text = message" ]
[ "Stratum Poll Reach LastRx Last sample =============================================================================== #* GPS0 0 4 377 11", "\"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \"", "as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e) def test_get_ntpd_sources(): parser_result", "23 -923us[ -924us] +/- 43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/-", "NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\" 210 Number of sources =", "-2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0", "== \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \"", ".INIT. 16 u - 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\"", "a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms ^+ d.e.f 1 6", "2 6 377 23 -923us[ -924us] +/- 43ms ^+ d.e.f 1 6 377", "-923us[ -924us] +/- 43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms", "ntpd_qn = \"\"\" remote refid st t when poll reach delay offset jitter", "u 163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid", "-0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st t when poll reach", "import SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output", "test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert", "\"\"\" remote refid st t when poll reach delay offset jitter ============================================================================== 172.16.58.3", "insights.core.dr import SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap", "from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\"", "t when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225", "e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e) def test_get_ntpd_sources(): parser_result =", "def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\"", "== \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\"", "\"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\"", "jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000 \"\"\"", "ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\" 210 Number of", "st t when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u", "parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") ==", "with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e) def", "\"NTP service is down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\")", "assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def", "parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert", "u - 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read:", "leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st t when poll reach delay", "parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\")", "ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\"", "Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert", "leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st", "insights.tests import context_wrap chrony_output = \"\"\" 210 Number of sources = 3 MS", "0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip()", "+/- 134ns ^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms ^+", "parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\"", "sample =============================================================================== #* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns ^?", "\"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip()", "u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256", "Reach LastRx Last sample =============================================================================== #* GPS0 0 4 377 11 -479ns[ -621ns]", "test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert", "assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2", "6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2", "-479ns[ -621ns] +/- 134ns ^? a.b.c 2 6 377 23 -923us[ -924us] +/-", "+ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn =", "============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue", "st t when poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u", "reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0 0.000", "def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\"", "insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\" 210", "assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent)", "\"\"\".strip() ntpd_output = \"\"\" remote refid st t when poll reach delay offset", "= \"\"\" 210 Number of sources = 3 MS Name/IP address Stratum Poll", "= \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert", "status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid", "service is down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") ==", "\"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result =", "is down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\"", "377 23 -923us[ -924us] +/- 43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us]", "test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap", "\"\"\" 210 Number of sources = 3 MS Name/IP address Stratum Poll Reach", "16 u - 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq:", "377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 =", "sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st t", "0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn", "chrony_output = \"\"\" 210 Number of sources = 3 MS Name/IP address Stratum", "10.225.208.100 2 u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u", "assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\")", "\"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\")", "\" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e)", "11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2 6 377 23 -923us[ -924us]", "21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\"", "parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as", "=============================================================================== #* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c", "d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00", "assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e:", "1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused", "256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459", "ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15", "\"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg,", "parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result", "210 Number of sources = 3 MS Name/IP address Stratum Poll Reach LastRx", "when poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024", "NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as", "-924us] +/- 43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip()", "NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\" 210 Number of sources", "from insights.tests import context_wrap chrony_output = \"\"\" 210 Number of sources = 3", "MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== #* GPS0 0", "= \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events,", "pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e) def test_get_ntpd_sources():", "43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output =", "remote refid st t when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100", "= \"\"\" remote refid st t when poll reach delay offset jitter ==============================================================================", "assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote", "\"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp,", "event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st t when poll reach", "down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert", "== \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result", "parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 =", "0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st t when poll", "6 377 23 -923us[ -924us] +/- 43ms ^+ d.e.f 1 6 377 21", "read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\"", "NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with", "parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") ==", "delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149", "- 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection", "GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2 6", "0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05 \"\"\".strip()", "15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st t when", "0 4 377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2 6 377", "address Stratum Poll Reach LastRx Last sample =============================================================================== #* GPS0 0 4 377", "172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000 \"\"\" ntp_connection_issue =", "256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st t", "SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output =", "Number of sources = 3 MS Name/IP address Stratum Poll Reach LastRx Last", "Last sample =============================================================================== #* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns", "0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st t when poll reach delay", "assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is", "assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP service", "-621ns] +/- 134ns ^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms", "0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05", "*ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2", "\"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") ==", "parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with", "\"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result =", "\"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2))", "0.000 0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def", "of sources = 3 MS Name/IP address Stratum Poll Reach LastRx Last sample", "2 u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163", "remote refid st t when poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT.", "== \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap", "import context_wrap chrony_output = \"\"\" 210 Number of sources = 3 MS Name/IP", "= NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\"", "NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service", "1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip()", "= NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent)", "= 3 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== #*", "377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377 0.459 -0.234", "parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue))", "sources = 3 MS Name/IP address Stratum Poll Reach LastRx Last sample ===============================================================================", "86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none,", "0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result", "assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output))", "pytest from insights.core.dr import SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests", "reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464", "assert \"NTP service is down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert", "refid st t when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2", "def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert", "#* GPS0 0 4 377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2", "^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms ^+ d.e.f 1", "2 u 163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote", "t when poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u -", "\"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\") == \"172.16.58.3\" assert", "\"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e)", "= NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") ==", "4 377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2 6 377 23", "offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000 0.000", "+/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\" leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4", "== \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e:", "offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019", "============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150", "parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\")", "events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output = \"\"\" remote refid st t when poll", "NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\"", "\"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP", "import pytest from insights.core.dr import SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from", "ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output", "/usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") ==", "\"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap ==", "\"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue))", "10.228.209.150 2 u 163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\"", "import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap chrony_output = \"\"\" 210 Number", "parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result = NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap ==", "context_wrap chrony_output = \"\"\" 210 Number of sources = 3 MS Name/IP address", "377 11 -479ns[ -621ns] +/- 134ns ^? a.b.c 2 6 377 23 -923us[", "leap=00 \"\"\".strip() ntpq_leap_output_2 = \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00", "in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\")", "LastRx Last sample =============================================================================== #* GPS0 0 4 377 11 -479ns[ -621ns] +/-", "\" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in", "refid st t when poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16", "== \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\" parser_result =", "\"\"\".strip() ntpd_qn = \"\"\" remote refid st t when poll reach delay offset", "== \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in", "Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== #* GPS0 0 4", "parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap():", "3 MS Name/IP address Stratum Poll Reach LastRx Last sample =============================================================================== #* GPS0", "= ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") ==", "== \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn))", "when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256", "poll reach delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0", "== \"172.16.58.3\" assert parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert", "+/- 43ms ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output", "0.000 0.000 \"\"\" ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources():", "377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st t when", "parser_result2.data[0].get(\"flag\") == \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP service is", "= \"\"\" assID=0 status=06f4 leap_none, sync_ntp, 15 events, event_peer/strat_chg, leap=00 \"\"\".strip() ntpd_output =", "jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi", "assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert parser_result.leap == \"00\"", "225 256 377 0.464 0.149 0.019 +ntp104.cm4.tbsi 10.228.209.150 2 u 163 256 377", "str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) assert parser_result.data[0].get(\"source\") == \"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") ==", "ntp_connection_issue = \"\"\" /usr/sbin/ntpq: read: Connection refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output))", "\"\"\" remote refid st t when poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi", "poll reach delay offset jitter ============================================================================== *ntp103.cm4.tbsi 10.225.208.100 2 u 225 256 377", "163 256 377 0.459 -0.234 0.05 \"\"\".strip() ntpd_qn = \"\"\" remote refid st", "\"ntp103.cm4.tbsi\" assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert", "^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms \"\"\".strip() ntpq_leap_output = \"\"\"", "= NtpqLeap(context_wrap(ntpq_leap_output_2)) assert parser_result.leap == \"00\" with pytest.raises(SkipComponent) as e: NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP", "assert parser_result.data[1].get(\"flag\") == \"+\" assert parser_result.data[1].get(\"source\") == \"ntp104.cm4.tbsi\" parser_result2 = NtpqPn(context_wrap(ntpd_qn)) assert parser_result2.data[0].get(\"source\")", "134ns ^? a.b.c 2 6 377 23 -923us[ -924us] +/- 43ms ^+ d.e.f", "parser_result.data[2].get(\"state\") == \"+\" assert parser_result.data[2].get(\"mode\") == \"^\" def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) assert", "NtpqLeap(context_wrap(ntp_connection_issue)) assert \"NTP service is down\" in str(e) def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output))", "Poll Reach LastRx Last sample =============================================================================== #* GPS0 0 4 377 11 -479ns[", "ntpd_output = \"\"\" remote refid st t when poll reach delay offset jitter", "delay offset jitter ============================================================================== 172.16.58.3 .INIT. 16 u - 1024 0 0.000 0.000", "refused \"\"\".strip() def test_get_chrony_sources(): parser_result = ChronycSources(context_wrap(chrony_output)) assert parser_result.data[1].get(\"source\") == \"a.b.c\" assert parser_result.data[2].get(\"state\")", "from insights.core.dr import SkipComponent from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import", "== \" \" with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert \"NTP service is down\"" ]
[ "import random as rd def verifier(C, y, g, p): e = rd.randint(1,100) t", "= rd.randint(1,100) t = yield e if (g**t == (y**e)*C): accept = 1", "e if (g**t == (y**e)*C): accept = 1 else: accept = 0 yield", "<gh_stars>0 import random as rd def verifier(C, y, g, p): e = rd.randint(1,100)", "e = rd.randint(1,100) t = yield e if (g**t == (y**e)*C): accept =", "rd.randint(1,100) t = yield e if (g**t == (y**e)*C): accept = 1 else:", "t = yield e if (g**t == (y**e)*C): accept = 1 else: accept", "def verifier(C, y, g, p): e = rd.randint(1,100) t = yield e if", "verifier(C, y, g, p): e = rd.randint(1,100) t = yield e if (g**t", "random as rd def verifier(C, y, g, p): e = rd.randint(1,100) t =", "= yield e if (g**t == (y**e)*C): accept = 1 else: accept =", "yield e if (g**t == (y**e)*C): accept = 1 else: accept = 0", "g, p): e = rd.randint(1,100) t = yield e if (g**t == (y**e)*C):", "if (g**t == (y**e)*C): accept = 1 else: accept = 0 yield accept", "p): e = rd.randint(1,100) t = yield e if (g**t == (y**e)*C): accept", "rd def verifier(C, y, g, p): e = rd.randint(1,100) t = yield e", "as rd def verifier(C, y, g, p): e = rd.randint(1,100) t = yield", "y, g, p): e = rd.randint(1,100) t = yield e if (g**t ==" ]
[ "Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Natural Language ::", ":: Python :: 3\", \"Programming Language :: Python :: 3.7\", \"Topic :: Software", "python from setuptools import setup, find_packages with open(\"README.rst\", \"r\") as f: long_description =", "find_packages with open(\"README.rst\", \"r\") as f: long_description = f.read() name = \"password_validator\" version", "Software Development :: Libraries\", \"Topic :: Software Development :: Libraries :: Python Modules\"],", "flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"},", "System :: OS Independent\", \"Programming Language :: Python\", \"Programming Language :: Python ::", "Developers\", \"License :: OSI Approved :: MIT License\", \"Natural Language :: English\", \"Operating", "Language :: Python\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python", "author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status ::", "Independent\", \"Programming Language :: Python\", \"Programming Language :: Python :: 3\", \"Programming Language", "Python\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.7\",", ":: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir':", "author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status", "long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[", "\"Development Status :: 5 - Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI", ":: English\", \"Operating System :: OS Independent\", \"Programming Language :: Python\", \"Programming Language", "license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development", "English\", \"Operating System :: OS Independent\", \"Programming Language :: Python\", \"Programming Language ::", "f: long_description = f.read() name = \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates", "version=version, description=\"Validates password according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\",", "Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py', 'docs/source'),", "Language :: Python :: 3.7\", \"Topic :: Software Development\", \"Topic :: Software Development", "name = \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible", "setup, find_packages with open(\"README.rst\", \"r\") as f: long_description = f.read() name = \"password_validator\"", "'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py', 'docs/source'), 'build_dir': ('setup.py', 'docs/build')}}, )", "f.read() name = \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password according to", "Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py',", "Libraries\", \"Topic :: Software Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx': {", "{ 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py', 'docs/source'), 'build_dir': ('setup.py', 'docs/build')}},", "'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py', 'docs/source'), 'build_dir': ('setup.py',", "Approved :: MIT License\", \"Natural Language :: English\", \"Operating System :: OS Independent\",", "intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password,", "Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version),", "Software Development\", \"Topic :: Software Development :: Libraries\", \"Topic :: Software Development ::", "keywords: \"password, validation, schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended Audience", "\"Programming Language :: Python :: 3.7\", \"Topic :: Software Development\", \"Topic :: Software", ":: Python :: 3.7\", \"Topic :: Software Development\", \"Topic :: Software Development ::", "and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords:", "3.7\", \"Topic :: Software Development\", \"Topic :: Software Development :: Libraries\", \"Topic ::", "password according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\",", "Status :: 5 - Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI Approved", "#!/usr/bin/env python from setuptools import setup, find_packages with open(\"README.rst\", \"r\") as f: long_description", "specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation,", ":: 3.7\", \"Topic :: Software Development\", \"Topic :: Software Development :: Libraries\", \"Topic", ":: Software Development\", \"Topic :: Software Development :: Libraries\", \"Topic :: Software Development", "Python :: 3\", \"Programming Language :: Python :: 3.7\", \"Topic :: Software Development\",", ":: Libraries\", \"Topic :: Software Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx':", "description=\"Validates password according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\",", "= \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible and", "OS Independent\", \"Programming Language :: Python\", \"Programming Language :: Python :: 3\", \"Programming", "\"r\") as f: long_description = f.read() name = \"password_validator\" version = \"1.0\" setup(name=name,", "\"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible and intuitive", "\"password, validation, schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended Audience ::", ":: OSI Approved :: MIT License\", \"Natural Language :: English\", \"Operating System ::", ":: Python\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python ::", "version = \"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible and intuitive specifications\",", "Language :: English\", \"Operating System :: OS Independent\", \"Programming Language :: Python\", \"Programming", ":: OS Independent\", \"Programming Language :: Python\", \"Programming Language :: Python :: 3\",", "\"Topic :: Software Development\", \"Topic :: Software Development :: Libraries\", \"Topic :: Software", "import setup, find_packages with open(\"README.rst\", \"r\") as f: long_description = f.read() name =", "\"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended", "\"Operating System :: OS Independent\", \"Programming Language :: Python\", \"Programming Language :: Python", "url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status :: 5", "\"Topic :: Software Development :: Libraries\", \"Topic :: Software Development :: Libraries ::", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.7\", \"Topic", "packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status :: 5 -", "\"Programming Language :: Python\", \"Programming Language :: Python :: 3\", \"Programming Language ::", ":: Software Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py',", "Software Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name),", "according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"),", "Development\", \"Topic :: Software Development :: Libraries\", \"Topic :: Software Development :: Libraries", "- Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\",", ":: 3\", \"Programming Language :: Python :: 3.7\", \"Topic :: Software Development\", \"Topic", "from setuptools import setup, find_packages with open(\"README.rst\", \"r\") as f: long_description = f.read()", "validation, schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended Audience :: Developers\",", ":: Developers\", \"License :: OSI Approved :: MIT License\", \"Natural Language :: English\",", "OSI Approved :: MIT License\", \"Natural Language :: English\", \"Operating System :: OS", "Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Natural", "long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\",", "package_dir={\"\": \"src\"}, keywords: \"password, validation, schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\",", "classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended Audience :: Developers\", \"License ::", "Development :: Libraries\", \"Topic :: Software Development :: Libraries :: Python Modules\"], command_options={", ":: Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py',", "\"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT License\", \"Natural Language", "long_description = f.read() name = \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password", "= \"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible and intuitive specifications\", long_description=long_description,", "with open(\"README.rst\", \"r\") as f: long_description = f.read() name = \"password_validator\" version =", "\"1.0\" setup(name=name, version=version, description=\"Validates password according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst',", "<filename>setup.py #!/usr/bin/env python from setuptools import setup, find_packages with open(\"README.rst\", \"r\") as f:", ":: 5 - Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI Approved ::", "\"Natural Language :: English\", \"Operating System :: OS Independent\", \"Programming Language :: Python\",", "to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\", author=\"<NAME>\", author_email=\"<EMAIL>\", url=\"https://github.com/tarunbatra/password-validator-python\", packages=find_packages(\"src\"), package_dir={\"\":", ":: MIT License\", \"Natural Language :: English\", \"Operating System :: OS Independent\", \"Programming", "\"License :: OSI Approved :: MIT License\", \"Natural Language :: English\", \"Operating System", "License\", \"Natural Language :: English\", \"Operating System :: OS Independent\", \"Programming Language ::", "Language :: Python :: 3\", \"Programming Language :: Python :: 3.7\", \"Topic ::", "\"Topic :: Software Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project':", "setuptools import setup, find_packages with open(\"README.rst\", \"r\") as f: long_description = f.read() name", "= f.read() name = \"password_validator\" version = \"1.0\" setup(name=name, version=version, description=\"Validates password according", "setup(name=name, version=version, description=\"Validates password according to flexible and intuitive specifications\", long_description=long_description, long_description_content_type='text/x-rst', license=\"MIT\",", "as f: long_description = f.read() name = \"password_validator\" version = \"1.0\" setup(name=name, version=version,", "5 - Production/Stable\", \"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT", "schema\", classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Intended Audience :: Developers\", \"License", "MIT License\", \"Natural Language :: English\", \"Operating System :: OS Independent\", \"Programming Language", "3\", \"Programming Language :: Python :: 3.7\", \"Topic :: Software Development\", \"Topic ::", ":: Software Development :: Libraries\", \"Topic :: Software Development :: Libraries :: Python", "command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version': ('setup.py', version), 'source_dir': ('setup.py', 'docs/source'), 'build_dir':", "open(\"README.rst\", \"r\") as f: long_description = f.read() name = \"password_validator\" version = \"1.0\"", "Python :: 3.7\", \"Topic :: Software Development\", \"Topic :: Software Development :: Libraries\",", "Development :: Libraries :: Python Modules\"], command_options={ 'build_sphinx': { 'project': ('setup.py', name), 'version':" ]
[ "python -*- coding: utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any EXIF data", "class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return", "attached to the image. Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright:", "\"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__() if __name__ == \"__main__\":", "data attached to the image. Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>>", "import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5", "utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any EXIF data attached to the", "EXIFData from PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF", "PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import QtCore from", "-*- coding: utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any EXIF data attached", "QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image)", "EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None:", "QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import QtCore from PyQt5.QtCore import", "EXIF data attached to the image. Version: 1 - Initial release Author: J.MacGrillen", "from src.tools.exif_data import EXIFData from PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying", "PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data", "self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer", "import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data =", "EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__()", "/usr/bin/env python -*- coding: utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any EXIF", "EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout", "Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All", "Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\" import logging from PyQt5.QtWidgets import", "<NAME>. All rights reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from", "data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def", "= EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) ->", "Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\"", "(c) <NAME>. All rights reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout", "image. Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>.", "release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\" import", "All rights reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data", "Name: exif_view.py Desscription: Display any EXIF data attached to the image. Version: 1", "PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget):", "\"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__() if", "<<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\" import logging from PyQt5.QtWidgets", "logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import", "-*- \"\"\" Name: exif_view.py Desscription: Display any EXIF data attached to the image.", "Display any EXIF data attached to the image. Version: 1 - Initial release", "the image. Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c)", "\"\"\" Name: exif_view.py Desscription: Display any EXIF data attached to the image. Version:", "exif_view.py Desscription: Display any EXIF data attached to the image. Version: 1 -", "- Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved.", "rights reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import", "J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\" import logging from", "to the image. Version: 1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright", "import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import QtCore from PyQt5.QtCore", "\"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from", "Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights reserved. \"\"\" import logging", "QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import QtCore from PyQt5.QtCore import Qt", "import EXIFData from PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract", "from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData from PyQt5 import QtCore", "from PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\")", "from PyQt5.QtCore import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class", "Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF", "any EXIF data attached to the image. Version: 1 - Initial release Author:", "to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\"", "extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout:", "viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__() if __name__ ==", "v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__() if __name__ == \"__main__\": pass", "#! /usr/bin/env python -*- coding: utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any", "src.tools.exif_data import EXIFData from PyQt5 import QtCore from PyQt5.QtCore import Qt self.logger.debug(\"Trying to", "1 - Initial release Author: J.MacGrillen <<EMAIL>> Copyright: Copyright (c) <NAME>. All rights", "Copyright (c) <NAME>. All rights reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget,", "Desscription: Display any EXIF data attached to the image. Version: 1 - Initial", "self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\" EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls)", "import Qt self.logger.debug(\"Trying to extract EXIF data...\") self.exif_data = EXIFData(self.pil_image) class EXIFView(QDockWidget): \"\"\"", "EXIF viewer \"\"\" v_layout: QVBoxLayout def __init_subclass__(cls) -> None: return super().__init_subclass__() if __name__", "coding: utf-8 -*- \"\"\" Name: exif_view.py Desscription: Display any EXIF data attached to", "reserved. \"\"\" import logging from PyQt5.QtWidgets import QDockWidget, QVBoxLayout from src.tools.exif_data import EXIFData" ]
[ "bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py', 'gspread',", "Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py', 'gspread', 'oauth2client', 'requests',", "<filename>setup.py from setuptools import setup, find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for", "setuptools import setup, find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams',", "name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(),", "description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py',", "packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py', 'gspread', 'oauth2client', 'requests', 'srcomapi'], )", "setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT',", "for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py', 'gspread', 'oauth2client',", "setup, find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs',", "find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')),", "version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4',", "import setup, find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests',", "author='<NAME>', version='0.1dev', description='Discord bot for Azure Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0',", "Dreams', packages=find_packages(exclude=('tests', 'docs', 'data')), license='MIT', long_description=open('README.md').read(), python_requires='>=3.6.0', install_requires=['beautifulsoup4', 'discord.py', 'gspread', 'oauth2client', 'requests', 'srcomapi'],", "from setuptools import setup, find_packages setup( name='CurryBot', author='<NAME>', version='0.1dev', description='Discord bot for Azure" ]
[ "torchvision.transforms as T from .transforms import MultiSample, aug_transform from .base import BaseDataset def", "from .transforms import MultiSample, aug_transform from .base import BaseDataset def base_transform(): return T.Compose(", "from torchvision.datasets import CIFAR100 as C100 import torchvision.transforms as T from .transforms import", "ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t =", "0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg),", "C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True,", "from .base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675,", ".base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565,", "T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t =", "as C100 import torchvision.transforms as T from .transforms import MultiSample, aug_transform from .base", "import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]", ".transforms import MultiSample, aug_transform from .base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(),", "T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self):", "base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t =", "train=True, download=True, transform=t) def ds_test(self): t = base_transform() return C100(root=\"./data\", train=False, download=True, transform=t)", ") return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\",", "0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample(", ") class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples )", "transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self):", "[T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t", "class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return", "t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t = base_transform()", "train=True, download=True, transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t)", "base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset):", "n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t = base_transform() return", "t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,)", "as T from .transforms import MultiSample, aug_transform from .base import BaseDataset def base_transform():", "return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def", "T from .transforms import MultiSample, aug_transform from .base import BaseDataset def base_transform(): return", "def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] ) class", "return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True,", "CIFAR100 as C100 import torchvision.transforms as T from .transforms import MultiSample, aug_transform from", "= MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def", "MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self):", "self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t = base_transform()", "C100 import torchvision.transforms as T from .transforms import MultiSample, aug_transform from .base import", "MultiSample, aug_transform from .base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867,", "0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples", "def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True,", "torchvision.datasets import CIFAR100 as C100 import torchvision.transforms as T from .transforms import MultiSample,", "import torchvision.transforms as T from .transforms import MultiSample, aug_transform from .base import BaseDataset", "base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t = base_transform() return C100(root=\"./data\",", "download=True, transform=t,) def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def", "(0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform,", "return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t = base_transform() return C100(root=\"./data\", train=False,", "0.4408), (0.2675, 0.2565, 0.2761))] ) class CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32,", "= base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t = base_transform() return", "BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))] )", "C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t = base_transform() return C100(root=\"./data\", train=False, download=True,", "def ds_clf(self): t = base_transform() return C100(root=\"./data\", train=True, download=True, transform=t) def ds_test(self): t", "ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True,", "aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\", train=True, download=True, transform=t,) def ds_clf(self): t", "import CIFAR100 as C100 import torchvision.transforms as T from .transforms import MultiSample, aug_transform", "import MultiSample, aug_transform from .base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071,", "CIFAR100(BaseDataset): def ds_train(self): t = MultiSample( aug_transform(32, base_transform, self.aug_cfg), n=self.aug_cfg.num_samples ) return C100(root=\"./data\",", "aug_transform from .base import BaseDataset def base_transform(): return T.Compose( [T.ToTensor(), T.Normalize((0.5071, 0.4867, 0.4408)," ]
[ "use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False if not use_bd: setattr(self, key,", "{1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all of the declarative", "self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can", "from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False", "kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified attributes, class {0} does not", "if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( (", "getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc, ( types.MethodType,", "**kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key, obj in list(kwargs.items()): try: parent_desc", "for key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key]", "list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue if", "= False if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj return", "obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if", ").format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all of the declarative arguments", ") #now run the __mid_init__ before all of the declarative arguments trigger self.__mid_init__()", "try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue if isinstance(", "= getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc, (", "\"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise", "continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise ValueError( (", "= kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only redefine", "isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise ValueError( ( \"Can only", "): raise ValueError( ( \"Can only redefine non-method descriptors, {0} a method of", "except AttributeError: use_bd = False if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key]", "class {0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the", "setattr(self, key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\"", "use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs):", "key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] =", "\"Can only redefine class-specified attributes, class {0} does not have elements {1}\" ).format(self.__class__.__name__,", "_overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key, obj in list(kwargs.items()): try:", "of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd", "try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False if not use_bd: setattr(self,", "\"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\"", "only redefine non-method descriptors, {0} a method of class {1}\" ).format(key, self.__class__.__name__) )", "attributes, class {0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run", "= obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise", "if kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified attributes, class {0} does", "__mid_init__ before all of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \",", "_overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key,", "redefine class-specified attributes, class {0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) )", "declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return def __mid_init__(self): \"\"\"", "{1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False", "= {} for key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except", "# -*- coding: utf-8 -*- \"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes", "self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only", "-*- \"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase", "not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self,", "types.MethodType, staticmethod, classmethod ) ): raise ValueError( ( \"Can only redefine non-method descriptors,", "raise ValueError( ( \"Can only redefine non-method descriptors, {0} a method of class", "**kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched:", "= obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs =", "False if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched", "parent_desc._force_boot_dict except AttributeError: use_bd = False if not use_bd: setattr(self, key, obj) else:", "use_bd = False if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj", "AttributeError: use_bd = False if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] =", ").format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False if", "\"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class", "obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj", ") ): raise ValueError( ( \"Can only redefine non-method descriptors, {0} a method", "kwargs_unmatched[key] = obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ):", "def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key, obj in list(kwargs.items()):", "self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified attributes, class {0}", "{0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__", "\"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError(", "all of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return", "trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return def __mid_init__(self): \"\"\" \"\"\" return", "import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs =", "method of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError:", "obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise ValueError(", "redefine non-method descriptors, {0} a method of class {1}\" ).format(key, self.__class__.__name__) ) try:", "only redefine class-specified attributes, class {0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys()))", "{} for key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError:", "except AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod", "a method of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except", "if not use_bd: setattr(self, key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def", "list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all of the declarative arguments trigger", "class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd =", "types from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object):", "arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return def __mid_init__(self): \"\"\" \"\"\"", "elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all of the", "{0} a method of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict", "self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs", "OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self,", "kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified attributes,", "( \"Can only redefine non-method descriptors, {0} a method of class {1}\" ).format(key,", "\"\"\" import types from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes,", "-*- coding: utf-8 -*- \"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes from", "SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None", "the __mid_init__ before all of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO:", "of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return def", "HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs =", "does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before", "kwargs_unmatched = {} for key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key)", "not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all", "import types from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase,", ".utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs", "before all of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self)", "def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs)", "the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__() #print(\"OO: \", self) return def __mid_init__(self):", "_overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched =", ") try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False if not use_bd:", "parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise ValueError( ( \"Can only redefine", "\"Can only redefine non-method descriptors, {0} a method of class {1}\" ).format(key, self.__class__.__name__)", "raise ValueError( ( \"Can only redefine class-specified attributes, class {0} does not have", "= None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key, obj", "else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs:", "utf-8 -*- \"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes from .utilities.representations import", "obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs", "coding: utf-8 -*- \"\"\" \"\"\" import types from .properties import HasDeclaritiveAttributes from .utilities.representations", "= self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified attributes, class", "= parent_desc._force_boot_dict except AttributeError: use_bd = False if not use_bd: setattr(self, key, obj)", "key) except AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod,", "have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now run the __mid_init__ before all of", "non-method descriptors, {0} a method of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd", "False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for", "import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs", "\"\"\" \"\"\" kwargs_unmatched = {} for key, obj in list(kwargs.items()): try: parent_desc =", "= False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {}", "( \"Can only redefine class-specified attributes, class {0} does not have elements {1}\"", "key, obj) else: self.__boot_dict__[key] = obj return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\"", "return kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched", "ValueError( ( \"Can only redefine class-specified attributes, class {0} does not have elements", "kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if kwargs_unmatched: raise ValueError( ( \"Can only redefine class-specified", "None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched = {} for key, obj in", "classmethod ) ): raise ValueError( ( \"Can only redefine non-method descriptors, {0} a", "\"\"\" kwargs_unmatched = {} for key, obj in list(kwargs.items()): try: parent_desc = getattr(self.__class__,", "#now run the __mid_init__ before all of the declarative arguments trigger self.__mid_init__() super(OverridableObject,", "kwargs_unmatched def __init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched =", "class-specified attributes, class {0} does not have elements {1}\" ).format(self.__class__.__name__, list(kwargs_unmatched.keys())) ) #now", "class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def", "descriptors, {0} a method of class {1}\" ).format(key, self.__class__.__name__) ) try: use_bd =", "run the __mid_init__ before all of the declarative arguments trigger self.__mid_init__() super(OverridableObject, self).__init__()", "in list(kwargs.items()): try: parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue", "from .properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\"", "self.__class__.__name__) ) try: use_bd = parent_desc._force_boot_dict except AttributeError: use_bd = False if not", "ValueError( ( \"Can only redefine non-method descriptors, {0} a method of class {1}\"", "if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod ) ): raise ValueError( ( \"Can", "\"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\" \"\"\" kwargs_unmatched", "object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs): \"\"\"", "SuperBase, object): \"\"\" \"\"\" _overridable_object_save_kwargs = False _overridable_object_kwargs = None def _overridable_object_inject(self, **kwargs):", "__init__(self, **kwargs): \"\"\" \"\"\" if self._overridable_object_save_kwargs: self._overridable_object_kwargs = kwargs kwargs_unmatched = self._overridable_object_inject(**kwargs) if", "parent_desc = getattr(self.__class__, key) except AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc,", ".properties import HasDeclaritiveAttributes from .utilities.representations import SuperBase class OverridableObject(HasDeclaritiveAttributes, SuperBase, object): \"\"\" \"\"\"", "staticmethod, classmethod ) ): raise ValueError( ( \"Can only redefine non-method descriptors, {0}", "( types.MethodType, staticmethod, classmethod ) ): raise ValueError( ( \"Can only redefine non-method", "AttributeError: kwargs_unmatched[key] = obj continue if isinstance( parent_desc, ( types.MethodType, staticmethod, classmethod )" ]
[ "setting = x[0] for i in range(x[1]): bitinppat[pnt] = setting % 2 pnt", "includes 2 calibration bits # (note: 1px = 1bit at this stage of", "np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop): #", "nof_black_ln_aft # total lines in bit pattern, # includes 1 calibration line x_dim", "video with 2x2 bit pattern \"bit\" size as the lowest resolution def bp_create(bitpat_file_dir,", "(int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat =", "framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as np from pathlib import Path", "= np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x", "bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in settings: setting =", "dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in settings: setting", "= setting >> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for i in", "pixels per data line for i in range(2): ibp[ycur+1+j, xcur + i] =", "# ystart, ystop, xstart, xstop for i in range(xstart, xstop): ycur = int(j*vinc)", "at this stage of the process) vact_ref = 135.0 # make float, as", "make float, as target result must be float hact_ref = 240.0 # vinc", "is followed by the number of bits used to encode it settings =", "2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before", "for i in range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i,", "(note: 1px = 1bit at this stage of the process) vact_ref = 135.0", "bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as np from", "Each setting is followed by the number of bits used to encode it", "1 calibration line x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft #", "pix_per_ln, ln_per_frame): import cv2 import numpy as np from pathlib import Path bits_per_ln", "+ nof_data_ln + nof_black_ln_aft # total lines in bit pattern, # includes 1", "2x2 bit pattern \"bit\" size as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames,", "= [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp =", "calibration line x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total", "1 setting = setting >> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for", "calibration bits # (note: 1px = 1bit at this stage of the process)", "x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total pixels per", "range(nof_data_ln): # calibration pixels per data line for i in range(2): ibp[ycur+1+j, xcur", "bit pattern, # includes 1 calibration line x_dim = nof_black_px_bef + 2 +", "the process) vact_ref = 135.0 # make float, as target result must be", "ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0 xstop = int(x_dim/hinc) ystart", "int(j*vinc) xcur = int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')),", "pnt = 0 for x in settings: setting = x[0] for i in", "+ nof_black_px_aft # total pixels per bit pattern line, # includes 2 calibration", "rate, horizontal and vertical resolution # Step 1: define bit pattern for 240x135", "= 1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3 #", "| nof_frames | framerate | pix_per_ln | ln_per_frame # Each setting is followed", "numpy as np from pathlib import Path bits_per_ln = 96 # coded_bits_per_line (data", "(nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8')", "np from pathlib import Path bits_per_ln = 96 # coded_bits_per_line (data payload, excluding", "(data payload, excluding calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2", "| framerate | pix_per_ln | ln_per_frame # Each setting is followed by the", "j in range(ystart, ystop): # ystart, ystop, xstart, xstop for i in range(xstart,", "of the process) vact_ref = 135.0 # make float, as target result must", "xcur + i] = 1-(i % 2) # calibration line for j in", "includes 1 calibration line x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft", "nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total lines in bit pattern,", "1px = 1bit at this stage of the process) vact_ref = 135.0 #", "ln_per_frame # Each setting is followed by the number of bits used to", "= 5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef +", "in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i % 2) # calibration line", "24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat", "j in range(nof_data_ln): # calibration pixels per data line for i in range(2):", "= int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"|", "in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart", "= np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop):", "of bits used to encode it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000),", "with 2x2 bit pattern \"bit\" size as the lowest resolution def bp_create(bitpat_file_dir, frame_number,", "this stage of the process) vact_ref = 135.0 # make float, as target", "in settings: setting = x[0] for i in range(x[1]): bitinppat[pnt] = setting %", "24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim),", "range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0 xstop = int(x_dim/hinc)", "i] = ((i+j) % 2) for j in range(nof_data_ln): for i in range(bits_per_ln):", "5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1", "# coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef", "+ i] xstart = 0 xstop = int(x_dim/hinc) ystart = 0 ystop =", "i] = 1-(i % 2) # calibration line for j in range(nof_data_ln): #", "range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i % 2) # calibration line for", "of: frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame # Each setting", "bit pattern containing frame number, total frames, frame rate, horizontal and vertical resolution", "size as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import", "[(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim,", "followed by the number of bits used to encode it settings = [(frame_number,", "j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i]", "dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop): # ystart, ystop, xstart,", "to encode it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13),", "lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy", "frames, frame rate, horizontal and vertical resolution # Step 1: define bit pattern", "total frames, frame rate, horizontal and vertical resolution # Step 1: define bit", "hact_ref / pix_per_ln # Settings consist of: frame_number | nof_frames | framerate |", "as np from pathlib import Path bits_per_ln = 96 # coded_bits_per_line (data payload,", "= nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total pixels per bit", "setting >> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)):", "for i in range(2): ibp[ycur+1+j, xcur + i] = ((i+j) % 2) for", "calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft", "calibration pixels per data line for i in range(2): ibp[ycur+1+j, xcur + i]", "<reponame>nicholas-fr/mezzanine<gh_stars>1-10 # Generates a bit pattern containing frame number, total frames, frame rate,", "print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop): # ystart, ystop, xstart, xstop", "horizontal and vertical resolution # Step 1: define bit pattern for 240x135 video", "pattern \"bit\" size as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln,", "# includes 1 calibration line x_dim = nof_black_px_bef + 2 + bits_per_ln +", "% 2) # calibration line for j in range(nof_data_ln): # calibration pixels per", "+ 1 + nof_data_ln + nof_black_ln_aft # total lines in bit pattern, #", "in range(nof_data_ln): # calibration pixels per data line for i in range(2): ibp[ycur+1+j,", "Note: intended for 480x270 video with 2x2 bit pattern \"bit\" size as the", "= x[0] for i in range(x[1]): bitinppat[pnt] = setting % 2 pnt =", "bit pattern for 240x135 video # Step 2: upscale as needed towards target", "int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in", "settings: setting = x[0] for i in range(x[1]): bitinppat[pnt] = setting % 2", "ystop, xstart, xstop for i in range(xstart, xstop): ycur = int(j*vinc) xcur =", "ystop): # ystart, ystop, xstart, xstop for i in range(xstart, xstop): ycur =", "0 xstop = int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop,", "i] xstart = 0 xstop = int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc)", "np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in settings: setting = x[0] for", "number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef =", "nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3", "= 240.0 # vinc = vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln", "i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0 xstop", "i in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i % 2) # calibration", "frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as np from pathlib", "/ pix_per_ln # Settings consist of: frame_number | nof_frames | framerate | pix_per_ln", "17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,),", "for i in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i % 2) #", "Generates a bit pattern containing frame number, total frames, frame rate, horizontal and", "2 calibration bits # (note: 1px = 1bit at this stage of the", "2: upscale as needed towards target resolution # Note: intended for 480x270 video", "in range(x[1]): bitinppat[pnt] = setting % 2 pnt = pnt + 1 setting", "containing frame number, total frames, frame rate, horizontal and vertical resolution # Step", "Step 2: upscale as needed towards target resolution # Note: intended for 480x270", "= ((i+j) % 2) for j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j,", "xstop for i in range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j,", "+ 2 + bits_per_ln + nof_black_px_aft # total pixels per bit pattern line,", "frame number, total frames, frame rate, horizontal and vertical resolution # Step 1:", "cv2 import numpy as np from pathlib import Path bits_per_ln = 96 #", "(ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt =", "intended for 480x270 video with 2x2 bit pattern \"bit\" size as the lowest", "ystart, ystop, xstart, xstop for i in range(xstart, xstop): ycur = int(j*vinc) xcur", "= nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total lines in bit", "per data line for i in range(2): ibp[ycur+1+j, xcur + i] = ((i+j)", "int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\"", "range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart =", "x in settings: setting = x[0] for i in range(x[1]): bitinppat[pnt] = setting", "= 1-(i % 2) # calibration line for j in range(nof_data_ln): # calibration", "i00[j, i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\" |", "resolution # Step 1: define bit pattern for 240x135 video # Step 2:", "pix_per_ln # Settings consist of: frame_number | nof_frames | framerate | pix_per_ln |", "a bit pattern containing frame number, total frames, frame rate, horizontal and vertical", "i in range(2): ibp[ycur+1+j, xcur + i] = ((i+j) % 2) for j", "float hact_ref = 240.0 # vinc = vact_ref / ln_per_frame hinc = hact_ref", "total lines in bit pattern, # includes 1 calibration line x_dim = nof_black_px_bef", "int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8')", "result must be float hact_ref = 240.0 # vinc = vact_ref / ln_per_frame", "as needed towards target resolution # Note: intended for 480x270 video with 2x2", "# Each setting is followed by the number of bits used to encode", "bitinppat[pnt] = setting % 2 pnt = pnt + 1 setting = setting", "range(ystart, ystop): # ystart, ystop, xstart, xstop for i in range(xstart, xstop): ycur", "in range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i, :] =", "1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur", "pixels per bit pattern line, # includes 2 calibration bits # (note: 1px", "pnt + 1 setting = setting >> 1 ycur = nof_black_ln_bef xcur =", "for i in range(x[1]): bitinppat[pnt] = setting % 2 pnt = pnt +", "= 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='',", "2) for j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln", "= 2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1 #", "in range(ystart, ystop): # ystart, ystop, xstart, xstop for i in range(xstart, xstop):", "240.0 # vinc = vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln #", "# Generates a bit pattern containing frame number, total frames, frame rate, horizontal", "end='', flush=True) for j in range(ystart, ystop): # ystart, ystop, xstart, xstop for", "= 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft", "payload, excluding calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2 #", "ycur = nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur +", "# calibration line for j in range(nof_data_ln): # calibration pixels per data line", "for 480x270 video with 2x2 bit pattern \"bit\" size as the lowest resolution", "range(x[1]): bitinppat[pnt] = setting % 2 pnt = pnt + 1 setting =", "pattern for 240x135 video # Step 2: upscale as needed towards target resolution", "nof_data_ln + nof_black_ln_aft # total lines in bit pattern, # includes 1 calibration", "excluding calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before", "setting is followed by the number of bits used to encode it settings", "= 96 # coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln = 2 #", "ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\" | \"+{True: 'saved', False: 'failed'}[status])", "= setting % 2 pnt = pnt + 1 setting = setting >>", "as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2", "bits used to encode it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17),", "setting % 2 pnt = pnt + 1 setting = setting >> 1", "i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart,", "pathlib import Path bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration bits)", "vertical resolution # Step 1: define bit pattern for 240x135 video # Step", "def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as np", "13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0", "i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\" | \"+{True:", "xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0", "| ln_per_frame # Each setting is followed by the number of bits used", "consist of: frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame # Each", "ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for", "# Note: intended for 480x270 video with 2x2 bit pattern \"bit\" size as", "bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft =", "pix_per_ln | ln_per_frame # Each setting is followed by the number of bits", "xcur = int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00)", "= 1bit at this stage of the process) vact_ref = 135.0 # make", "# number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft", "# (note: 1px = 1bit at this stage of the process) vact_ref =", "nof_black_px_aft # total pixels per bit pattern line, # includes 2 calibration bits", "number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft =", "x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in settings:", "nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur + i] =", "= nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i %", "line, # includes 2 calibration bits # (note: 1px = 1bit at this", ":] = ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\" | \"+{True: 'saved',", "= int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3),", "vinc = vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln # Settings consist", "\"bit\" size as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame):", "hact_ref = 240.0 # vinc = vact_ref / ln_per_frame hinc = hact_ref /", "bitinppat[j*bits_per_ln + i] xstart = 0 xstop = int(x_dim/hinc) ystart = 0 ystop", "Step 1: define bit pattern for 240x135 video # Step 2: upscale as", "data line for i in range(2): ibp[ycur+1+j, xcur + i] = ((i+j) %", "xcur + i] = ((i+j) % 2) for j in range(nof_data_ln): for i", "for j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln +", "# total lines in bit pattern, # includes 1 calibration line x_dim =", "2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after", "ibp[ycur, xcur + i] = 1-(i % 2) # calibration line for j", "in bit pattern, # includes 1 calibration line x_dim = nof_black_px_bef + 2", "# calibration pixels per data line for i in range(2): ibp[ycur+1+j, xcur +", "/ ln_per_frame hinc = hact_ref / pix_per_ln # Settings consist of: frame_number |", "line for j in range(nof_data_ln): # calibration pixels per data line for i", "x[0] for i in range(x[1]): bitinppat[pnt] = setting % 2 pnt = pnt", "video # Step 2: upscale as needed towards target resolution # Note: intended", "bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln = 2", "= nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur + i]", "1 + nof_data_ln + nof_black_ln_aft # total lines in bit pattern, # includes", "for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0", "nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1", "line for i in range(2): ibp[ycur+1+j, xcur + i] = ((i+j) % 2)", "y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total lines in", "= np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in settings: setting = x[0]", "# total pixels per bit pattern line, # includes 2 calibration bits #", "for j in range(ystart, ystop): # ystart, ystop, xstart, xstop for i in", "Settings consist of: frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame #", "upscale as needed towards target resolution # Note: intended for 480x270 video with", "dtype='uint8') pnt = 0 for x in settings: setting = x[0] for i", "+ i] = 1-(i % 2) # calibration line for j in range(nof_data_ln):", "number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total lines", "| pix_per_ln | ln_per_frame # Each setting is followed by the number of", "for j in range(nof_data_ln): # calibration pixels per data line for i in", "# vinc = vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln # Settings", "nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i % 2)", "bits # (note: 1px = 1bit at this stage of the process) vact_ref", "3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop): # ystart, ystop,", "% 2 pnt = pnt + 1 setting = setting >> 1 ycur", "frame rate, horizontal and vertical resolution # Step 1: define bit pattern for", "+ bits_per_ln + nof_black_px_aft # total pixels per bit pattern line, # includes", "import cv2 import numpy as np from pathlib import Path bits_per_ln = 96", "# Step 2: upscale as needed towards target resolution # Note: intended for", "# number_of_data_lines nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef", "= 135.0 # make float, as target result must be float hact_ref =", "np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for x in", "96 # coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln = 2 # number_of_data_lines", "nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef", "by the number of bits used to encode it settings = [(frame_number, 24),", "number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln", "import Path bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln", "be float hact_ref = 240.0 # vinc = vact_ref / ln_per_frame hinc =", "vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln # Settings consist of: frame_number", "ystart = 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop),", "ln_per_frame): import cv2 import numpy as np from pathlib import Path bits_per_ln =", "2 + bits_per_ln + nof_black_px_aft # total pixels per bit pattern line, #", "# number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft # total", "per bit pattern line, # includes 2 calibration bits # (note: 1px =", "nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as np from pathlib import", "2 pnt = pnt + 1 setting = setting >> 1 ycur =", "# includes 2 calibration bits # (note: 1px = 1bit at this stage", "pattern, # includes 1 calibration line x_dim = nof_black_px_bef + 2 + bits_per_ln", "ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status", "target result must be float hact_ref = 240.0 # vinc = vact_ref /", "i in range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i, :]", "ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt = 0 for", "as target result must be float hact_ref = 240.0 # vinc = vact_ref", "bits_per_ln + nof_black_px_aft # total pixels per bit pattern line, # includes 2", "settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)] ibp", "hinc = hact_ref / pix_per_ln # Settings consist of: frame_number | nof_frames |", "import numpy as np from pathlib import Path bits_per_ln = 96 # coded_bits_per_line", "number of bits used to encode it settings = [(frame_number, 24), (nof_frames, 24),", "process) vact_ref = 135.0 # make float, as target result must be float", "calibration line for j in range(nof_data_ln): # calibration pixels per data line for", "xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j in range(ystart, ystop): # ystart,", "xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur, xcur + i] = 1-(i", "# Step 1: define bit pattern for 240x135 video # Step 2: upscale", "must be float hact_ref = 240.0 # vinc = vact_ref / ln_per_frame hinc", "nof_frames | framerate | pix_per_ln | ln_per_frame # Each setting is followed by", "pattern containing frame number, total frames, frame rate, horizontal and vertical resolution #", "range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc) i00[j, i, :] = ibp[ycur,", "towards target resolution # Note: intended for 480x270 video with 2x2 bit pattern", "ibp[ycur+1+j, xcur + i] = ((i+j) % 2) for j in range(nof_data_ln): for", "number, total frames, frame rate, horizontal and vertical resolution # Step 1: define", "number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim =", "1-(i % 2) # calibration line for j in range(nof_data_ln): # calibration pixels", "for x in settings: setting = x[0] for i in range(x[1]): bitinppat[pnt] =", "resolution # Note: intended for 480x270 video with 2x2 bit pattern \"bit\" size", "it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame, 13)]", "flush=True) for j in range(ystart, ystop): # ystart, ystop, xstart, xstop for i", "needed towards target resolution # Note: intended for 480x270 video with 2x2 bit", "240x135 video # Step 2: upscale as needed towards target resolution # Note:", "1bit at this stage of the process) vact_ref = 135.0 # make float,", "lines in bit pattern, # includes 1 calibration line x_dim = nof_black_px_bef +", "2) # calibration line for j in range(nof_data_ln): # calibration pixels per data", "and vertical resolution # Step 1: define bit pattern for 240x135 video #", "for 240x135 video # Step 2: upscale as needed towards target resolution #", "line x_dim = nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total pixels", "xstop = int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop,", "coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln = 2 # number_of_data_lines nof_black_ln_bef =", "135.0 # make float, as target result must be float hact_ref = 240.0", "+ nof_black_ln_aft # total lines in bit pattern, # includes 1 calibration line", "in range(2): ibp[ycur+1+j, xcur + i] = ((i+j) % 2) for j in", "pnt = pnt + 1 setting = setting >> 1 ycur = nof_black_ln_bef", "resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import numpy as", "= int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True) for j", "3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln + nof_black_ln_aft #", "1 # number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after", "= int(j*vinc) xcur = int(i*hinc) i00[j, i, :] = ibp[ycur, xcur]*255.0 status =", "the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate, pix_per_ln, ln_per_frame): import cv2 import", "target resolution # Note: intended for 480x270 video with 2x2 bit pattern \"bit\"", "encode it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln, 13), (ln_per_frame,", "= hact_ref / pix_per_ln # Settings consist of: frame_number | nof_frames | framerate", "i in range(x[1]): bitinppat[pnt] = setting % 2 pnt = pnt + 1", "= vact_ref / ln_per_frame hinc = hact_ref / pix_per_ln # Settings consist of:", ">> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for i in range((bits_per_ln+2)): ibp[ycur,", "= bitinppat[j*bits_per_ln + i] xstart = 0 xstop = int(x_dim/hinc) ystart = 0", "xstart, xstop for i in range(xstart, xstop): ycur = int(j*vinc) xcur = int(i*hinc)", "480x270 video with 2x2 bit pattern \"bit\" size as the lowest resolution def", "0 ystop = int(y_dim/vinc) i00 = np.zeros((ystop, xstop, 3), dtype='uint8') print(str(xstop)+\"x\"+str(ystop), end='', flush=True)", "nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 + nof_data_ln +", "pattern line, # includes 2 calibration bits # (note: 1px = 1bit at", "% 2) for j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] =", "13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8') pnt", "# make float, as target result must be float hact_ref = 240.0 #", "1: define bit pattern for 240x135 video # Step 2: upscale as needed", "= ibp[ycur, xcur]*255.0 status = cv2.imwrite(str(Path(str(bitpat_file_dir)+'\\\\'+str(frame_number).zfill(5)+'.png')), i00) print(\"| \"+str(settings)+\" | \"+{True: 'saved', False:", "Path bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration bits) nof_data_ln =", "nof_black_px_bef + 2 + bits_per_ln + nof_black_px_aft # total pixels per bit pattern", "(pix_per_ln, 13), (ln_per_frame, 13)] ibp = np.zeros((y_dim, x_dim), dtype='uint8') bitinppat = np.zeros((bits_per_ln*nof_data_ln,), dtype='uint8')", "ln_per_frame hinc = hact_ref / pix_per_ln # Settings consist of: frame_number | nof_frames", "# Settings consist of: frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame", "= 2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5 #", "from pathlib import Path bits_per_ln = 96 # coded_bits_per_line (data payload, excluding calibration", "stage of the process) vact_ref = 135.0 # make float, as target result", "xstart = 0 xstop = int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00", "+ i] = ((i+j) % 2) for j in range(nof_data_ln): for i in", "# number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim = nof_black_ln_bef + 1 +", "bit pattern line, # includes 2 calibration bits # (note: 1px = 1bit", "# number_of_black_lines_after nof_black_px_bef = 5 # number_of_black_pixels_before nof_black_px_aft = 3 # number_of_black_pixels_after y_dim", "range(2): ibp[ycur+1+j, xcur + i] = ((i+j) % 2) for j in range(nof_data_ln):", "= 0 for x in settings: setting = x[0] for i in range(x[1]):", "used to encode it settings = [(frame_number, 24), (nof_frames, 24), (int(framerate*1000), 17), (pix_per_ln,", "the number of bits used to encode it settings = [(frame_number, 24), (nof_frames,", "vact_ref = 135.0 # make float, as target result must be float hact_ref", "= pnt + 1 setting = setting >> 1 ycur = nof_black_ln_bef xcur", "+ 1 setting = setting >> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef", "total pixels per bit pattern line, # includes 2 calibration bits # (note:", "setting = setting >> 1 ycur = nof_black_ln_bef xcur = nof_black_px_bef for i", "in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0 xstop =", "nof_black_ln_bef = 2 # number_of_black_lines_before nof_black_ln_aft = 1 # number_of_black_lines_after nof_black_px_bef = 5", "float, as target result must be float hact_ref = 240.0 # vinc =", "xcur+2+i] = bitinppat[j*bits_per_ln + i] xstart = 0 xstop = int(x_dim/hinc) ystart =", "frame_number | nof_frames | framerate | pix_per_ln | ln_per_frame # Each setting is", "= 0 xstop = int(x_dim/hinc) ystart = 0 ystop = int(y_dim/vinc) i00 =", "define bit pattern for 240x135 video # Step 2: upscale as needed towards", "bit pattern \"bit\" size as the lowest resolution def bp_create(bitpat_file_dir, frame_number, nof_frames, framerate,", "framerate | pix_per_ln | ln_per_frame # Each setting is followed by the number", "((i+j) % 2) for j in range(nof_data_ln): for i in range(bits_per_ln): ibp[ycur+1+j, xcur+2+i]", "0 for x in settings: setting = x[0] for i in range(x[1]): bitinppat[pnt]" ]
[ "Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args],", "__aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type, exc_value, tb): self.__exit__(exc_type, exc_value, tb)", "self.duration_milis = int((self.end - self.start) / 1000 / 1000) if exc_type is None:", "time import aws_scatter_gather.util.logger as logger def trace(message, *args): return Trace(message, *args) def traced(f):", "[*args], {**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object): def __init__(self, message, *args):", "self def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end -", "args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object): def", "*args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}):", "return self def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end", "__enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value,", "message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self,", "1000 / 1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\",", "self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def", "None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\".", "exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE", "**kwargs) return wrapper class Trace(object): def __init__(self, message, *args): self.message = message.format(*args) def", "%d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis,", "self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type, exc_value,", "= int((self.end - self.start) / 1000 / 1000) if exc_type is None: logger.info(\"SUCCESS", "return wrapper class Trace(object): def __init__(self, message, *args): self.message = message.format(*args) def __enter__(self):", "exc_info=True) async def __aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type, exc_value, tb):", "self.end = time.time_ns() self.duration_milis = int((self.end - self.start) / 1000 / 1000) if", "/ 1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message),", "str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async", "of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d", "**kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper", "= message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def", "self.start) / 1000 / 1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration", "aws_scatter_gather.util.logger as logger def trace(message, *args): return Trace(message, *args) def traced(f): def wrapper(*args,", "time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value, tb): self.end =", "def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs)", "import aws_scatter_gather.util.logger as logger def trace(message, *args): return Trace(message, *args) def traced(f): def", "tb): self.end = time.time_ns() self.duration_milis = int((self.end - self.start) / 1000 / 1000)", "%d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self async def", "self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value, tb):", "logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns()", "__exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end - self.start) /", "Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self async", "__init__(self, message, *args): self.message = message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\",", "def __init__(self, message, *args): self.message = message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START", "import time import aws_scatter_gather.util.logger as logger def trace(message, *args): return Trace(message, *args) def", "of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return", "*args): self.message = message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return", "kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object): def __init__(self,", "*args): return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\",", "millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True)", "time.time_ns() self.duration_milis = int((self.end - self.start) / 1000 / 1000) if exc_type is", "{**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object): def __init__(self, message, *args): self.message", "logger def trace(message, *args): return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with", "trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object):", "= time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value, tb): self.end", "- self.start) / 1000 / 1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\".", "f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper class Trace(object): def __init__(self, message,", "\\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self", "millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self async def __aexit__(self,", "def trace(message, *args): return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{}", "exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end - self.start) / 1000 /", "def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end - self.start)", "Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message),", "else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self):", "with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return wrapper class", "Trace(object): def __init__(self, message, *args): self.message = message.format(*args) def __enter__(self): self.start = time.time_ns()", "def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return", "str(self.message)) return self def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis =", "logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration", "return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__,", "logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__()", "traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args,", "f(*args, **kwargs) return wrapper class Trace(object): def __init__(self, message, *args): self.message = message.format(*args)", "def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type,", "message, *args): self.message = message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message))", "str(self.message), self.duration_milis, exc_info=True) async def __aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type,", "class Trace(object): def __init__(self, message, *args): self.message = message.format(*args) def __enter__(self): self.start =", "wrapper class Trace(object): def __init__(self, message, *args): self.message = message.format(*args) def __enter__(self): self.start", "self.message = message.format(*args) def __enter__(self): self.start = time.time_ns() logger.info(\"START \\\"%s\\\"...\", str(self.message)) return self", "def __aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type, exc_value, tb): self.__exit__(exc_type, exc_value,", "exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis = int((self.end - self.start) / 1000", "async def __aenter__(self): self.__enter__() return self async def __aexit__(self, exc_type, exc_value, tb): self.__exit__(exc_type,", "as logger def trace(message, *args): return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs):", "return f(*args, **kwargs) return wrapper class Trace(object): def __init__(self, message, *args): self.message =", "trace(message, *args): return Trace(message, *args) def traced(f): def wrapper(*args, **kwargs): with trace(\"{} args={},", "/ 1000 / 1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d", "is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of", "if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else:", "= time.time_ns() self.duration_milis = int((self.end - self.start) / 1000 / 1000) if exc_type", "int((self.end - self.start) / 1000 / 1000) if exc_type is None: logger.info(\"SUCCESS of", "1000) if exc_type is None: logger.info(\"SUCCESS of \\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis)", "\\\"%s\\\"...\", str(self.message)) return self def __exit__(self, exc_type, exc_value, tb): self.end = time.time_ns() self.duration_milis", "\\\"%s\\\". Duration %d millis.\", str(self.message), self.duration_milis) else: logger.info(\"FAILURE of \\\"%s\\\". Duration %d millis.\",", "wrapper(*args, **kwargs): with trace(\"{} args={}, kwargs={}\", f.__name__, [*args], {**kwargs}): return f(*args, **kwargs) return" ]
[ "jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data =", "FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1,", "JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data", "slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self):", "class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(),", "jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2)", "6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1],", "FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6)", "self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6)", "= JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak", "import JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak", "data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude()", "JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak =", "self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0]", "TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1,", "jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data =", "import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1],", "from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data =", "unittest from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase):", "self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0]", "(1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(),", "jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2)", "jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2)", "= JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak", "data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude()", "JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak =", "from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def", "JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude()", "test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0],", "self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6)", "= jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data", "6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1],", "FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1,", "(1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(),", "JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak =", "= jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data", "2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0],", "2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN)) self.assertEqual(data.edge_index.size()[0],", "import unittest from slgnn.data_processing.pyg_datasets import JAK1Dude, JAK2Dude, JAK3Dude from slgnn.config import FILTERED_PUBCHEM_FP_LEN class", "slgnn.config import FILTERED_PUBCHEM_FP_LEN class TestDudeDatasets(unittest.TestCase): def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0]", "self.assertEqual(data.edge_index.size()[0], 2) jak = JAK3Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))", "def test_jak1_jak2_jak3(self): jak = JAK1Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))", "self.assertEqual(data.edge_index.size()[0], 2) jak = JAK2Dude() data = jak[0] self.assertEqual(data.x.size()[1], 6) self.assertEqual(data.y.size(), (1, FILTERED_PUBCHEM_FP_LEN))" ]
[ "Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\",", "import models from django.contrib.auth.models import User # Create your models here. class Profile(models.Model):", "django.contrib.auth.models import User # Create your models here. class Profile(models.Model): user = models.OneToOneField(User,", "from django.contrib.auth.models import User # Create your models here. class Profile(models.Model): user =", "Create your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250,", "User # Create your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography", "models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True) phone_number", "class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image =", "# Create your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography =", "models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True) phone_number = models.CharField(max_length=15, blank=True, null=True)", "<gh_stars>1-10 from django.db import models from django.contrib.auth.models import User # Create your models", "from django.db import models from django.contrib.auth.models import User # Create your models here.", "django.db import models from django.contrib.auth.models import User # Create your models here. class", "= models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True) phone_number = models.CharField(max_length=15, blank=True,", "your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True,", "models from django.contrib.auth.models import User # Create your models here. class Profile(models.Model): user", "here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image", "on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True) phone_number =", "models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True)", "user = models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True,", "biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True) phone_number = models.CharField(max_length=15,", "import User # Create your models here. class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE)", "= models.OneToOneField(User, on_delete=models.CASCADE) biography = models.CharField(max_length=250, blank=True, null=True) image = models.ImageField(upload_to=\"users/images\", blank=True, null=True)" ]
[ "is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) ->", "bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model from pydantic import validate_arguments as", "= {} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...)", "presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true", "\"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {} for name, field", "= (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions )", "ignore def set_type_model(func: Callable) -> Callable: \"\"\" try generate request body model from", "...) else: default_value = getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__,", "<reponame>william-wambua/rpc.py<gh_stars>100-1000 import functools import inspect import typing import warnings __all__ = [ \"BaseModel\",", "specify the type for the parameter {func.__name__}:{name}.\" # ) return func # Maybe", "ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout:", "create_model from pydantic import validate_arguments as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205", "I'm not sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name]", ") type_error.more_info = exception raise type_error return change_exception # type: ignore except ImportError:", "typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {} for name,", "<title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({", "\" `.more_info` of this exception to view detailed information.\" ) type_error.more_info = exception", "field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter in sig.parameters.items(): if parameter.annotation ==", "using type hint \" \"to create OpenAPI docs or convert type, \" \"please", "typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...)", "# type: ignore def set_type_model(func: Callable) -> Callable: \"\"\" try generate request body", "to pass pydantic's type verification, please output\" \" `.more_info` of this exception to", "[ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true })", "getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore", "for the parameter {func.__name__}:{name}.\" # ) return func # Maybe the type hint", "func # Maybe the type hint should be mandatory? I'm not sure. if", "\"If you wanna using type hint \" \"to create OpenAPI docs or convert", "from pypi.\") def validate_arguments(function: Callable) -> Callable: return function BaseModel = type(\"BaseModel\", (),", "field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict,", "hint should be mandatory? I'm not sure. if parameter.default == parameter.empty: field_definitions[name] =", "= (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...) annotations[name] = (field, default_value)", "generate `pydantic.BaseModel` \"\"\" annotations = {} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field):", "typing.Dict[str, typing.Any] = {} for name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty:", "# ) return func # Maybe the type hint should be mandatory? I'm", "typing.Any] = {} for name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: #", "Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except", "function BaseModel = type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func: Callable) ->", ") setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = ( \"If you wanna using", "getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel`", "\"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from", "\"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter in", "output\" \" `.more_info` of this exception to view detailed information.\" ) type_error.more_info =", "...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model(", "], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true }) </script> </body> </html>", "-> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs)", "function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError", "functools import inspect import typing import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\",", ") return func # Maybe the type hint should be mandatory? I'm not", "body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message", ") warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and", "\"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div", "(parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] =", "rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui", "`pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable: return function BaseModel = type(\"BaseModel\",", "return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link", "(parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func,", "@functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError as exception: type_error", "warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_,", "= \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body>", "of this exception to view detailed information.\" ) type_error.more_info = exception raise type_error", "[ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable)", "`TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {} for name, field in typed_dict.__annotations__.items():", "try: return function(*args, **kwargs) except ValidationError as exception: type_error = TypeError( \"Failed to", "= (field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html>", "ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments # visit this issue #", "def validate_arguments(function: Callable) -> Callable: return function BaseModel = type(\"BaseModel\", (), {}) #", "verification, please output\" \" `.more_info` of this exception to view detailed information.\" )", "\"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic", "\" \"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_)", "type for the parameter {func.__name__}:{name}.\" # ) return func # Maybe the type", "type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable:", "sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter in sig.parameters.items():", "href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui =", "message = ( \"If you wanna using type hint \" \"to create OpenAPI", "# raise ValueError( # f\"You must specify the type for the parameter {func.__name__}:{name}.\"", "value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter", "url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true,", "default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name,", "import validate_arguments as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable)", "# type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\">", "visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function)", "import typing import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\",", "import functools import inspect import typing import warnings __all__ = [ \"BaseModel\", \"create_model\",", "exception: type_error = TypeError( \"Failed to pass pydantic's type verification, please output\" \"", "`.more_info` of this exception to view detailed information.\" ) type_error.more_info = exception raise", "for name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError( #", "else: default_value = getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations)", "name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE", "ImportWarning) return func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\",", "\"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel,", "model from type hint and default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str,", "create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = ( \"If", "parameter.annotation == parameter.empty: # raise ValueError( # f\"You must specify the type for", "ValidationError as exception: type_error = TypeError( \"Failed to pass pydantic's type verification, please", "information.\" ) type_error.more_info = exception raise type_error return change_exception # type: ignore except", "view detailed information.\" ) type_error.more_info = exception raise type_error return change_exception # type:", "-> Callable: \"\"\" try generate request body model from type hint and default", "Callable: return function BaseModel = type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func:", "body_model) except NotImplementedError: message = ( \"If you wanna using type hint \"", "pydantic import validate_arguments as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function:", "**kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable)", "type_error = TypeError( \"Failed to pass pydantic's type verification, please output\" \" `.more_info`", "as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable:", "validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return", "create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function:", "you wanna using type hint \" \"to create OpenAPI docs or convert type,", "'./get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions:", "def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict)", "not sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] =", "inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter in sig.parameters.items(): if parameter.annotation", "Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url:", "= (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel]", "the type hint should be mandatory? I'm not sure. if parameter.default == parameter.empty:", "and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate", "to view detailed information.\" ) type_error.more_info = exception raise type_error return change_exception #", "is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...) annotations[name] =", "def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def", "BaseModel, ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments # visit this issue", "as exception: type_error = TypeError( \"Failed to pass pydantic's type verification, please output\"", "<script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [", "\"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model", "except ImportError: def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic` from", "False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations", "from pydantic import BaseModel, ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments #", "Callable: \"\"\" try generate request body model from type hint and default value", "default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head>", "-> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]:", "ignore except ImportError: def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic`", "sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError( # f\"You must specify the", "def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations =", "issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict`", "import inspect import typing import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\",", "pass pydantic's type verification, please output\" \" `.more_info` of this exception to view", "parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if", "typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message =", "and default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for", "type hint and default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] =", "<script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset", "\"\"\" annotations = {} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] =", "field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model:", "name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError( # f\"You", "# https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args,", "type_error.more_info = exception raise type_error return change_exception # type: ignore except ImportError: def", "# Maybe the type hint should be mandatory? I'm not sure. if parameter.default", "def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError as exception: type_error =", "= typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model from pydantic import", "field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions", "issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def", "annotations = {} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field),", "pydantic's type verification, please output\" \" `.more_info` of this exception to view detailed", "== parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions:", "'#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions:", "f\"You must specify the type for the parameter {func.__name__}:{name}.\" # ) return func", "SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking:", "Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args,", "( \"If you wanna using type hint \" \"to create OpenAPI docs or", "create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\"", "body model from type hint and default value \"\"\" sig = inspect.signature(func) field_definitions:", "the parameter {func.__name__}:{name}.\" # ) return func # Maybe the type hint should", "<html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script", "ValueError( # f\"You must specify the type for the parameter {func.__name__}:{name}.\" # )", "parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {}", "-> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {} for", "for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value", "be mandatory? I'm not sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...)", "from pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool: return issubclass(type_,", "raise type_error return change_exception # type: ignore except ImportError: def create_model(*args, **kwargs): #", "type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI", "pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict)", "**field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = ( \"If you wanna", "wanna using type hint \" \"to create OpenAPI docs or convert type, \"", "please output\" \" `.more_info` of this exception to view detailed information.\" ) type_error.more_info", "if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default)", "type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func: Callable) -> Callable: \"\"\" try", "annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE", "= SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\",", "return change_exception # type: ignore except ImportError: def create_model(*args, **kwargs): # type: ignore", "setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = ( \"If you wanna using type", "(field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html>", "warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable", "{} for name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError(", "NotImplementedError: message = ( \"If you wanna using type hint \" \"to create", "html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div>", "this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function)", "request body model from type hint and default value \"\"\" sig = inspect.signature(func)", "= type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func: Callable) -> Callable: \"\"\"", "pypi.\") def validate_arguments(function: Callable) -> Callable: return function BaseModel = type(\"BaseModel\", (), {})", "from pydantic import validate_arguments as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def", "(parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return", "change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError as exception: type_error = TypeError(", "default_value = getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) #", "type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const", "# type: ignore except ImportError: def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need", "Maybe the type hint should be mandatory? I'm not sure. if parameter.default ==", "raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable: return function", "\"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to generate `pydantic.BaseModel` \"\"\"", "def set_type_model(func: Callable) -> Callable: \"\"\" try generate request body model from type", "Callable) -> Callable: \"\"\" try generate request body model from type hint and", "TypeError( \"Failed to pass pydantic's type verification, please output\" \" `.more_info` of this", "to generate `pydantic.BaseModel` \"\"\" annotations = {} for name, field in typed_dict.__annotations__.items(): if", "ImportError: def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\")", "name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value =", "{}) # type: ignore def set_type_model(func: Callable) -> Callable: \"\"\" try generate request", "return func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False)", "convert type, \" \"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func", "</head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs',", "parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError( # f\"You must", "try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError:", "should be mandatory? I'm not sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation,", "type hint should be mandatory? I'm not sure. if parameter.default == parameter.empty: field_definitions[name]", "`pydantic.BaseModel` \"\"\" annotations = {} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name]", "exception to view detailed information.\" ) type_error.more_info = exception raise type_error return change_exception", "# type: ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable) ->", "if parameter.annotation == parameter.empty: # raise ValueError( # f\"You must specify the type", "must specify the type for the parameter {func.__name__}:{name}.\" # ) return func #", "https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs):", "id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets:", "func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = ( \"If you", "...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) # type: ignore TEMPLATE =", "-> Callable: return function BaseModel = type(\"BaseModel\", (), {}) # type: ignore def", "(), {}) # type: ignore def set_type_model(func: Callable) -> Callable: \"\"\" try generate", "= TypeError( \"Failed to pass pydantic's type verification, please output\" \" `.more_info` of", "type hint \" \"to create OpenAPI docs or convert type, \" \"please install", "typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model from pydantic import validate_arguments", "create OpenAPI docs or convert type, \" \"please install `pydantic` from pypi.\" )", "detailed information.\" ) type_error.more_info = exception raise type_error return change_exception # type: ignore", "**kwargs): try: return function(*args, **kwargs) except ValidationError as exception: type_error = TypeError( \"Failed", "<body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id:", "] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model from", "hint \" \"to create OpenAPI docs or convert type, \" \"please install `pydantic`", "SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true }) </script> </body>", "SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true }) </script>", "dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: \"BaseLayout\", deepLinking: true, showExtensions: true,", "type_error return change_exception # type: ignore except ImportError: def create_model(*args, **kwargs): # type:", "**annotations) # type: ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\"", "return func # Maybe the type hint should be mandatory? I'm not sure.", "import BaseModel, ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments # visit this", "= [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\",", "validate_arguments(function: Callable) -> Callable: return function BaseModel = type(\"BaseModel\", (), {}) # type:", "install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool:", "ignore raise NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable: return", "else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__,", "= ( \"If you wanna using type hint \" \"to create OpenAPI docs", "in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name,", "= getattr(typed_dict, name, ...) annotations[name] = (field, default_value) return create_model(typed_dict.__name__, **annotations) # type:", "src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis,", "{func.__name__}:{name}.\" # ) return func # Maybe the type hint should be mandatory?", "parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation, parameter.default) if field_definitions: try:", "const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ],", "<head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script>", "Callable) -> Callable: return function BaseModel = type(\"BaseModel\", (), {}) # type: ignore", "NotImplementedError(\"Need install `pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable: return function BaseModel", "pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function", "hint and default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {}", "# visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) -> Callable: function =", "dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse `TypedDict` to", "\"__body_model__\", body_model) except NotImplementedError: message = ( \"If you wanna using type hint", "change_exception # type: ignore except ImportError: def create_model(*args, **kwargs): # type: ignore raise", "parse `TypedDict` to generate `pydantic.BaseModel` \"\"\" annotations = {} for name, field in", "function(*args, **kwargs) except ValidationError as exception: type_error = TypeError( \"Failed to pass pydantic's", "layout: \"BaseLayout\", deepLinking: true, showExtensions: true, showCommonExtensions: true }) </script> </body> </html> \"\"\"", "\"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import", "mandatory? I'm not sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else:", "= create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except NotImplementedError: message = (", "try generate request body model from type hint and default value \"\"\" sig", "field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model) except", "or convert type, \" \"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return", "\"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) ->", "func def is_typed_dict_type(type_) -> bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def", "== parameter.empty: # raise ValueError( # f\"You must specify the type for the", "type: ignore except ImportError: def create_model(*args, **kwargs): # type: ignore raise NotImplementedError(\"Need install", "type verification, please output\" \" `.more_info` of this exception to view detailed information.\"", "TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head>", "ignore TEMPLATE = \"\"\"<!DOCTYPE html> <html> <head> <link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title>", "= {} for name, parameter in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise", "{} for name, field in typed_dict.__annotations__.items(): if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else:", "import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ]", "Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError, create_model from pydantic", "<div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui',", "parameter.empty: # raise ValueError( # f\"You must specify the type for the parameter", "exception raise type_error return change_exception # type: ignore except ImportError: def create_model(*args, **kwargs):", "# f\"You must specify the type for the parameter {func.__name__}:{name}.\" # ) return", "the type for the parameter {func.__name__}:{name}.\" # ) return func # Maybe the", "docs or convert type, \" \"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning)", "bool: return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\"", "in sig.parameters.items(): if parameter.annotation == parameter.empty: # raise ValueError( # f\"You must specify", "__all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable =", "return function(*args, **kwargs) except ValidationError as exception: type_error = TypeError( \"Failed to pass", "\" \"to create OpenAPI docs or convert type, \" \"please install `pydantic` from", "except ValidationError as exception: type_error = TypeError( \"Failed to pass pydantic's type verification,", "pydantic import BaseModel, ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments # visit", "\"\"\" try generate request body model from type hint and default value \"\"\"", "= inspect.signature(func) field_definitions: typing.Dict[str, typing.Any] = {} for name, parameter in sig.parameters.items(): if", "= pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError as", "generate request body model from type hint and default value \"\"\" sig =", "validate_arguments as pydantic_validate_arguments # visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 def validate_arguments(function: Callable) ->", "`pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func def is_typed_dict_type(type_) -> bool: return", "<link type=\"text/css\" rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui.css\"> <title>OpenAPI Docs</title> </head> <body> <div id=\"swagger-ui\"></div> <script src=\"https://cdn.jsdelivr.net/npm/swagger-ui-dist@3.30.0/swagger-ui-bundle.js\"></script> <script>", "= exception raise type_error return change_exception # type: ignore except ImportError: def create_model(*args,", "type, \" \"please install `pydantic` from pypi.\" ) warnings.warn(message, ImportWarning) return func def", "from type hint and default value \"\"\" sig = inspect.signature(func) field_definitions: typing.Dict[str, typing.Any]", "if is_typed_dict_type(field): annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...) annotations[name]", "pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try: return function(*args, **kwargs) except ValidationError as exception:", "inspect import typing import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\",", "this exception to view detailed information.\" ) type_error.more_info = exception raise type_error return", "parameter.default) if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\",", "if field_definitions: try: body_model: typing.Type[BaseModel] = create_model( func.__name__, **field_definitions ) setattr(func, \"__body_model__\", body_model)", "**kwargs) except ValidationError as exception: type_error = TypeError( \"Failed to pass pydantic's type", "return function BaseModel = type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func: Callable)", "raise ValueError( # f\"You must specify the type for the parameter {func.__name__}:{name}.\" #", "parameter {func.__name__}:{name}.\" # ) return func # Maybe the type hint should be", "try: from pydantic import BaseModel, ValidationError, create_model from pydantic import validate_arguments as pydantic_validate_arguments", "set_type_model(func: Callable) -> Callable: \"\"\" try generate request body model from type hint", "\"to create OpenAPI docs or convert type, \" \"please install `pydantic` from pypi.\"", "type: ignore def set_type_model(func: Callable) -> Callable: \"\"\" try generate request body model", "typing import warnings __all__ = [ \"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\",", "\"BaseModel\", \"create_model\", \"validate_arguments\", \"set_type_model\", \"is_typed_dict_type\", \"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try:", "annotations[name] = (parse_typed_dict(field), ...) else: default_value = getattr(typed_dict, name, ...) annotations[name] = (field,", "return issubclass(type_, dict) and getattr(type_, \"__annotations__\", False) def parse_typed_dict(typed_dict) -> typing.Type[BaseModel]: \"\"\" parse", "except NotImplementedError: message = ( \"If you wanna using type hint \" \"to", "\"Failed to pass pydantic's type verification, please output\" \" `.more_info` of this exception", "def validate_arguments(function: Callable) -> Callable: function = pydantic_validate_arguments(function) @functools.wraps(function) def change_exception(*args, **kwargs): try:", "OpenAPI docs or convert type, \" \"please install `pydantic` from pypi.\" ) warnings.warn(message,", "\"parse_typed_dict\", \"TEMPLATE\", ] Callable = typing.TypeVar(\"Callable\", bound=typing.Callable) try: from pydantic import BaseModel, ValidationError,", "install `pydantic` from pypi.\") def validate_arguments(function: Callable) -> Callable: return function BaseModel =", "sure. if parameter.default == parameter.empty: field_definitions[name] = (parameter.annotation, ...) else: field_definitions[name] = (parameter.annotation,", "BaseModel = type(\"BaseModel\", (), {}) # type: ignore def set_type_model(func: Callable) -> Callable:" ]
[ "from selenium import webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def", "from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): # self.browser =", "class FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox() # def test(self): #", "test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response", "TestCase from selenium import webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): #", "response from django.test import TestCase from selenium import webdriver from account.forms import RegistrationForm", "= self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test',", "self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source # #", "# self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/') #", "def setUp(self): # self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order", "tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/')", "<filename>fyp/guardian/tests.py from urllib import response from django.test import TestCase from selenium import webdriver", "# self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source #", "in browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class", "# response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response =", "import TestCase from selenium import webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase):", "self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200) #", "urllib import response from django.test import TestCase from selenium import webdriver from account.forms", "import webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): #", "response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/')", "assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit()", "# assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self): #", "class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') #", "setUp(self): # self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID',", "# self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200)", "webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): # self.browser", "RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>', 'term':'True','user_type': 'pharmacist'}) # self.assertTrue(form.is_valid())", "# def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response", "test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source", "import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox() #", "def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in", "webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert", "self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy')", "self.assertEqual(response.status_code, 200) # def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email':", "def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response =", "form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>', 'term':'True','user_type': 'pharmacist'})", "from urllib import response from django.test import TestCase from selenium import webdriver from", "200) # def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>',", "test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>',", "# self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source # # assert", "self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response,", "# class FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox() # def test(self):", "self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code,", "RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox() # def", "# # assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): #", "def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password':", "FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000')", "# form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>', 'term':'True','user_type':", "'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() #", "# assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def", "# def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password':", "assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self):", "browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase): # def test_home_homepage_template(self): #", "import response from django.test import TestCase from selenium import webdriver from account.forms import", "= self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/') #", "ID', self.browser.page_source) # # assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') #", "# # assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self):", "def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self): #", "test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): # form", "= RegistrationForm(data={'first_name': 'test', 'last_name': 'test', 'email': '<EMAIL>', 'password': '<PASSWORD>','confirm_password': '<PASSWORD>', 'term':'True','user_type': 'pharmacist'}) #", "# class UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html')", "# def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # # assert 'Pharmacy'", "# def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def test_home_status_code(self):", "# response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): # form =", "def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): #", "django.test import TestCase from selenium import webdriver from account.forms import RegistrationForm # class", "response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): # form = RegistrationForm(data={'first_name':", "# self.assertEqual(response.status_code, 200) # def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name': 'test',", "account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self): # self.browser = webdriver.Firefox()", "self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) #", "# def setUp(self): # self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') #", "= webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source) # #", "selenium import webdriver from account.forms import RegistrationForm # class FunctionalTestCase(TestCase): # def setUp(self):", "from django.test import TestCase from selenium import webdriver from account.forms import RegistrationForm #", "# def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self):", "self.client.get('/') # self.assertEqual(response.status_code, 200) # def test_register_form(self): # form = RegistrationForm(data={'first_name': 'test', 'last_name':", "self.browser.page_source) # # assert 'Pharmacy' in browser.page_source # # assert browser.page_source.find('Pharmacy') # def", "'dashboardpharmacy.html') # def test_home_status_code(self): # response = self.client.get('/') # self.assertEqual(response.status_code, 200) # def", "UnitTestCase(TestCase): # def test_home_homepage_template(self): # response = self.client.get('/') # self.assertTemplateUsed(response, 'dashboardpharmacy.html') # def", "# self.browser = webdriver.Firefox() # def test(self): # self.browser.get('http://localhost:8000') # self.assertIn('Order ID', self.browser.page_source)", "browser.page_source # # assert browser.page_source.find('Pharmacy') # def tearDown(self): # self.browser.quit() # class UnitTestCase(TestCase):" ]
[ "pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback #", "= ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] * level, label, p) if", "p = ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] * level, label, p)", "== '__main__': model = get_model(args.model_pb_graph) # file mode if args.input_file != '': process_file(model,", "channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking ) #", "stream_callback=callback # uncomment for non_blocking ) # main loop stream.start_stream() while stream.is_active(): main_process(model,", "= int(p*10) + 1 print(emoji[label] * level, label, p) if __name__ == '__main__':", "np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result]", "ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] * level, label, p) if __name__", "args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS", "__name__ == '__main__': model = get_model(args.model_pb_graph) # file mode if args.input_file != '':", "args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode if args.input < 0: print_pyaudio_devices()", "= pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback", "print(emoji[label] * level, label, p) if __name__ == '__main__': model = get_model(args.model_pb_graph) #", "my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio =", "mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream = audio.open(", "* level, label, p) if __name__ == '__main__': model = get_model(args.model_pb_graph) # file", "'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] * level,", "file mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list", "my_exit(model) # device list display mode if args.input < 0: print_pyaudio_devices() my_exit(model) #", "# device list display mode if args.input < 0: print_pyaudio_devices() my_exit(model) # normal:", "'\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result]", "device list display mode if args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime", ") # main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream() stream.close() #", "if args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16", "', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label", "'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if label", "input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking ) # main loop stream.start_stream()", "realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream =", "label = conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level", "format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking )", "mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display", "{'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result =", "['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] *", "'__main__': model = get_model(args.model_pb_graph) # file mode if args.input_file != '': process_file(model, args.input_file,", "* emoji = {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def", "loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream() stream.close() # finish audio.terminate() my_exit(model)", "args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode if", "on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']:", "audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking", "process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode if args.input < 0:", "label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) + 1", "main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream() stream.close() # finish audio.terminate()", "CHANNELS = 1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True,", "', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if", "level = int(p*10) + 1 print(emoji[label] * level, label, p) if __name__ ==", "uncomment for non_blocking ) # main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001)", "import * emoji = {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '}", "get_model(args.model_pb_graph) # file mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) #", "input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking ) # main loop", "on_predicted_deskwork) my_exit(model) # device list display mode if args.input < 0: print_pyaudio_devices() my_exit(model)", "= audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for", "!= '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode if args.input", "= 1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input,", "# uncomment for non_blocking ) # main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork)", "'': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode if args.input <", "pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate,", "result = np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p", "rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking ) # main", "print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio", "+ 1 print(emoji[label] * level, label, p) if __name__ == '__main__': model =", "int(p*10) + 1 print(emoji[label] * level, label, p) if __name__ == '__main__': model", "# file mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device", "FORMAT = pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT,", "list display mode if args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode", "display mode if args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT", "conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10)", "audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False,", "stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment", "if __name__ == '__main__': model = get_model(args.model_pb_graph) # file mode if args.input_file !=", "0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1", "'\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred)", "1 print(emoji[label] * level, label, p) if __name__ == '__main__': model = get_model(args.model_pb_graph)", "= conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level =", "1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS, rate=conf.sampling_rate, input=True, input_device_index=args.input, frames_per_buffer=conf.rt_chunk_samples,", "p) if __name__ == '__main__': model = get_model(args.model_pb_graph) # file mode if args.input_file", "normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream", "'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) + 1 print(emoji[label] * level, label,", "'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label =", "= {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred): result", "level, label, p) if __name__ == '__main__': model = get_model(args.model_pb_graph) # file mode", "if label in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) +", "for non_blocking ) # main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream()", "start=False, stream_callback=callback # uncomment for non_blocking ) # main loop stream.start_stream() while stream.is_active():", "emoji = {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328 '} def on_predicted_deskwork(ensembled_pred):", "label, p) if __name__ == '__main__': model = get_model(args.model_pb_graph) # file mode if", "if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model) # device list display mode", "'\\u2328 '} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if label in", "# normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio()", "from realtime_predictor import * emoji = {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard':", "in ['Writing', 'Scissors', 'Computer_keyboard']: p = ensembled_pred[result] level = int(p*10) + 1 print(emoji[label]", "# main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream() stream.close() # finish", "= get_model(args.model_pb_graph) # file mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork) my_exit(model)", "= np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing', 'Scissors', 'Computer_keyboard']: p =", "non_blocking ) # main loop stream.start_stream() while stream.is_active(): main_process(model, on_predicted_deskwork) time.sleep(0.001) stream.stop_stream() stream.close()", "'} def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing',", "realtime_predictor import * emoji = {'Writing': '\\U0001F4DD ', 'Scissors': '\\u2701 ', 'Computer_keyboard': '\\u2328", "mode if args.input < 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT =", "def on_predicted_deskwork(ensembled_pred): result = np.argmax(ensembled_pred) label = conf.labels[result] if label in ['Writing', 'Scissors',", "< 0: print_pyaudio_devices() my_exit(model) # normal: realtime mode FORMAT = pyaudio.paInt16 CHANNELS =", "frames_per_buffer=conf.rt_chunk_samples, start=False, stream_callback=callback # uncomment for non_blocking ) # main loop stream.start_stream() while", "model = get_model(args.model_pb_graph) # file mode if args.input_file != '': process_file(model, args.input_file, on_predicted_deskwork)", "= pyaudio.paInt16 CHANNELS = 1 audio = pyaudio.PyAudio() stream = audio.open( format=FORMAT, channels=CHANNELS," ]
[ "Unless required by applicable law or agreed to in writing, software # distributed", "each leaf and spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] == '':", "{} for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans:", "for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del']", "backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for", "in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan',", "segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id,", "leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device connects to", "leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([]))", "and host. port_count = db.get_vm_count(network_id, host_id) if port_count == 1: segments = context.network.network_segments", "network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator =", "Limited Copyright 2003-2015, All rights reserved. # # Licensed under the Apache License,", "None: LOG.info(_(\"Begin create vlan network: device %s, \" \"create vlan %s, port trunk", "import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP", "from neutron.plugins.ml2 import driver_api from oslo_log import log as logging from neutron.common import", "device and DHCP port port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute')", "tenant_id): LOG.info(_(\"The port %s of virtual machine %s has \" \"already inserted into", "context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context):", "device %s, \" \"create vlan %s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list)", "Create NETCONF instances for each leaf and spine device.\"\"\" for leaf in self.leaf_topology:", "= db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id)", "successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create", "segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not", "in network %s is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id =", "delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host)", "configuration in physical devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list =", "vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type))", "network %s is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id']", "configuration in the leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def", "device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client", "network postcommit begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] segments", "is True: LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed", "And remove the vlan configuration in the leaf device. for leaf_ip in leaf_ref_vlans:", "= vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect", "= db.get_host_list(network_id) # Find which leaf device connects to the host_id. leaf_need_configure =", "context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id':", "from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client", "vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def", "segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit", "where the port created. :param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id,", "# with leafs connected to spine. spine_delete_score = {} for spine_ip, topology in", "vlan %s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if", "port.\"), device_owner) return device_id = port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id", "= list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = []", "\"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id", "result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config successful for\" \"", "-*- coding: utf-8 -*- # # H3C Technologies Co., Limited Copyright 2003-2015, All", "same # device specified by ip address. leaf_ref_vlans = {} leaf_ref_host = {}", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "for HP networking hardware. Automation for VLANs configure with HP switches. \"\"\" def", "the host number in same network # with leafs connected to spine. spine_delete_score", "LOG.info(_(\"Delete physical port configuration: \" \"All VMs of host %s in network %s", "None: LOG.warn(_(\"No such switch whose IP is %s in \" \"the configuration file.\"),", "leaf_ref_host[leaf_ip] = True # If there is no host connects to leaf in", "vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create", "connects to above leafs. # We need remove this spine's configuration. spine_generator =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the port created. :param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id)", "self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration to physical device. self.sync_helper =", "| vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type)", "tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if", "device IP. \"\"\" client = None if self.rpc_clients is not None: client =", "leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not", "port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id,", "database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the count of port", "port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only process virtual", "context.host: LOG.info(_(\"update port postcommit: No changed.\")) return # Migration is happen. LOG.info(_(\"Migration is", "network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have %d vms, \"", "id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type': gre", "is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan", "port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s", "logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking hardware. Automation for", "== host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if", "import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from", "pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete network information from", "LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id']", "True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config successful for\"", "in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology:", "is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] #", "rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client =", "are %d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self,", "spine device connects to the leaf device # which is configured above. spine_generator", "this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete", "def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id)", "= cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend =", "process virtual machine and DHCP server's port. port = context.current device_owner = port['device_owner']", "machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner is", "device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical devices. for dev_ip", "instance specified by device IP. \"\"\" client = None if self.rpc_clients is not", "which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip", "delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network =", ":param network_id. The uuid of network. :param host_id. The host where the port", "= topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in", "%s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context):", "db.get_vm_count(network_id, host_id) if port_count == 1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if", "is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success for %s.\"),", "port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when", "\"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context):", "and DHCP port port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\", "of virtual machine %s has \" \"already inserted into the network %s.\"), str(port_id),", "%s has \" \"already inserted into the network %s.\"), str(port_id), str(device_id), str(network_id)) return", "thread.for sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap)", "if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when", "not use this file except in compliance with the License. # You may", "config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s", "delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\"))", "result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config", "devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client =", "self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration to physical device. self.sync_helper", "network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type': gre | vlan", "connected to spine. spine_delete_score = {} for spine_ip, topology in spine_generator: leaf_ip =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id)", "self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass %s, url schema %s,\" \"timeout", "append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host) else: if len(set([vlan_id])", "def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete", "segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id)", "def delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network", "vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def", "agreed to in writing, software # distributed under the License is distributed on", "context): \"\"\" Just insert network information into database. When the port is created,", "leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list))", "devices. :param network_id. The uuid of network. :param host_id. The host where the", "network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id)", "vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host", "vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf device connects to the host_id.", "spine device connects to above leafs. # We need remove this spine's configuration.", "0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"),", "= port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id", "for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config:", "rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not", "network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self,", "still have %d vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id,", "leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find which spine", "from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as", "# It is the counter of host that connects to the same #", "LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port", "and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner)", "None if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None) if client is", "= network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create", "= list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] =", "ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id = ports['id']", "after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients()", "context): \"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here", "= sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\"", "network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete", "vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed", "== context.host: LOG.info(_(\"update port postcommit: No changed.\")) return # Migration is happen. LOG.info(_(\"Migration", "if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] =", "to in writing, software # distributed under the License is distributed on an", "implied. # See the License for the specific language governing permissions and #", "\"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical devices. for", "rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete", "delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host) else: if", "Create restful instances foreach leaf and spine device.\"\"\" for leaf in self.leaf_topology: rest_client", "host that connects to the same # device specified by ip address. leaf_ref_vlans", "port %s of virtual machine %s has \" \"already inserted into the network", "network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self,", "0: leaf_ref_host[leaf_ip] = True # If there is no host connects to leaf", "# [{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type': gre | vlan |", "= self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list =", "leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict", "delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config", "(device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner)", "begin.\")) # Only process virtual machine device and DHCP port port = context.current", "!= n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner) return device_id =", "[]) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'],", "vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for", "if client is None: LOG.warn(_(\"No such switch whose IP is %s in \"", "%s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id,", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "is not None: LOG.info(_(\"Begin create vlan network: device %s, \" \"create vlan %s,", "coding: utf-8 -*- # # H3C Technologies Co., Limited Copyright 2003-2015, All rights", "# implied. # See the License for the specific language governing permissions and", "IP. \"\"\" client = None if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip,", "from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import", "in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf device connects to", "= topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id]", "\"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only", "vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in", "create_port_postcommit(self, context): \"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) #", "self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem", "|= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', [])", "= tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in", "# we will remove the configuration in the spine device. # And remove", "= [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in leaf_generator:", "leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del']", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id,", "leaf device # which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology", "device specified by ip address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config =", "self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf and", "restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username,", "schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout,", "host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology", "delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does", "LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self,", "{} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host']", "result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is", "\"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert network", "= cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list", "None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will call it after __init__.", "{} delete_config = {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False)", "spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem", "LOG.info(_(\"Delete network begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] if", "leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip in", "for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create vlan", "ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id,", "= context.current network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network", "self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances", "port_count = db.get_vm_count(network_id, host_id) if port_count == 1: segments = context.network.network_segments segment_type =", "device # only if it is the last vm of host in this", "%s in network %s is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id", "in \" \"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We", "if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success", "to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def", "device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list", "def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in our physical devices. :param", "leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'],", "We don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert network information", "in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def", "port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The", "database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id']", "device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create vlan network:", "Copyright 2003-2015, All rights reserved. # # Licensed under the Apache License, Version", "device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for", "with leafs connected to spine. spine_delete_score = {} for spine_ip, topology in spine_generator:", "to delete vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk", "\"\"\" Create restful instances foreach leaf and spine device.\"\"\" for leaf in self.leaf_topology:", "vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf device", "segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def", "= None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will call it after", "delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del']", "as logging from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from", "self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password)", "port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list)", "= config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout =", "in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True", "the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"),", "return # Delete configuration in device # only if it is the last", "driver for HP networking hardware. Automation for VLANs configure with HP switches. \"\"\"", "configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock =", "and spine device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'],", "physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id = network['id']", "in database, ignore it\")) return # Delete configuration in device # only if", "%s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type': gre |", "self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client)", "tenant_id = network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with", "host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan)", "not None: client = self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such switch", "port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical devices.\"\"\"", "in device # only if it is the last vm of host in", "to above leafs. # We need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology)", "configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual", "LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete", "n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner) return segments = context.network.network_segments", "device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner) return device_id", "network = context.current network_id = network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if", "physical port configuration: \" \"All VMs of host %s in network %s is", "== 1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id", "for spine in self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem nc_client =", "the port is created, we do real operations in our physical device. \"\"\"", "if old_host_id is None or old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\"))", "real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process", "# limitations under the License. from oslo.config import cfg from neutron.plugins.ml2 import driver_api", "\"There are %d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def", "delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list))", "self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self):", "update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self,", "port_count == 1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan':", "for leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for", "leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', [])", "netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG =", "from oslo_log import log as logging from neutron.common import constants as n_const from", "spine %s, user %s, pass %s, url schema %s,\" \"timeout %d, rpc backend", "delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id = ports['network_id']", "ignore it\")) return # Delete configuration in device # only if it is", "def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\"", "self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf and", "The host where the port created. :param vlan_id. Segmentation ID \"\"\" device_config_list =", "database. When the port is created, we do real operations in our physical", "True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] =", "in leaf_generator: leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |=", "same network and host. port_count = db.get_vm_count(network_id, host_id) if port_count == 1: segments", "tools.topology_generator(self.spine_topology) # This dict is used to count the host number in same", "update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process the migration of virtual machine.\"\"\"", "leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem'] ==", "import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import", "append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"),", "leafs. # We need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This", "device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in", "LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and port on", "DHCP port port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner", "segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network", "nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC client instance specified by device", "topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host", "See the License for the specific language governing permissions and # limitations under", "\"\"\"Just process the migration of virtual machine.\"\"\" port = context.current device_owner = port['device_owner']", "Co., Limited Copyright 2003-2015, All rights reserved. # # Licensed under the Apache", "network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port", "host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore it\")) return #", "self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf", "= context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore", "client = self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such switch whose IP", "LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def", "in self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'],", "pass def create_network_postcommit(self, context): \"\"\" Just insert network information into database. When the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "= restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'],", "\"All VMs of host %s in network %s is deleted. \"), host_id, network_id)", "not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf device connects", "self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf and spine device.\"\"\"", "it is the last vm of host in this network vm_count = db.get_vm_count(network_id,", "in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list))", "device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do", "changed.\")) return # Migration is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id,", "network %s still have %d vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id,", "self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem", "if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine %s", "or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner) return", "port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is", "neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp", "owner %s when update port.\"), device_owner) return device_id = port['device_id'] port_id = port['id']", "Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical", "spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create']", "_create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf and spine device.\"\"\" for leaf", "append((topology['leaf_ports'], vlan_list)) # Check does spine need to delete vlan. for spine_ip in", "port owner %s when update port.\"), device_owner) return device_id = port['device_id'] port_id =", "def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\"", "self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC client instance specified by", "as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import", "tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm", "dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if", "_create_rest_clients(self): \"\"\" Create restful instances foreach leaf and spine device.\"\"\" for leaf in", "this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is used to count", "last vm of host in this network vm_count = db.get_vm_count(network_id, host_id) if vm_count", "spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\"", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout", "and # limitations under the License. from oslo.config import cfg from neutron.plugins.ml2 import", "import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import", "topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict:", "networking hardware. Automation for VLANs configure with HP switches. \"\"\" def __init__(self, rpc=None):", "if spine['oem'] == '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username,", "call it after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend ==", "+= 1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan',", "= [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine", "device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting", "server's port. port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\", "port created. :param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) #", "cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None", "db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip,", "segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self,", "there is no host connects to leaf in the same network, # we", "vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still", "has \" \"already inserted into the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert", "network # with leafs connected to spine. spine_delete_score = {} for spine_ip, topology", "import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as", "def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process the migration of virtual", "in the leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and", "port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id,", "= port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id =", "= cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def initialize(self):", "in same network # with leafs connected to spine. spine_delete_score = {} for", "old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None or old_host_id ==", "vm_count == 1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs of host %s", "to leaf in the same network, # we will remove the configuration in", "= device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not None:", "the count of port that created in the same network and host. port_count", "= int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else:", "create_network_postcommit(self, context): \"\"\" Just insert network information into database. When the port is", "a thread.for sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout,", "nc_client) for spine in self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem nc_client", "spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config", "device_id = ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id,", "%s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list, dev_ip)", "self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'],", "care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert network information into database.", "VMs of host %s in network %s is deleted. \"), host_id, network_id) segment_type", "'': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client)", "in the spine device. # And remove the vlan configuration in the leaf", "specified by device IP. \"\"\" client = None if self.rpc_clients is not None:", "[] if host in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0:", "port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None or old_host_id", "segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id)", "def create_network_postcommit(self, context): \"\"\" Just insert network information into database. When the port", "same network, # we will remove the configuration in the spine device. #", "= context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock:", "remove the configuration in the spine device. # And remove the vlan configuration", "= netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if", "def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf and spine device.\"\"\" for leaf", "Find which spine device connects to the leaf device # which is configured", "self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration to physical device.", "topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] +=", "delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\", "insert network information into database. When the port is created, we do real", "same network # with leafs connected to spine. spine_delete_score = {} for spine_ip,", "neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config", "self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password", "dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed", "leaf_ref_host = {} delete_config = {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([]))", "config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time)", "LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured. \"", "deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type ==", "{} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0:", "This dict is used to count the host number in same network #", "segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not", "dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if", "\" \"All VMs of host %s in network %s is deleted. \"), host_id,", "pass %s, url schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username,", "if spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete", "__init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf", "delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device connects to above leafs. #", "= self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf and spine", "port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list, dev_ip) else:", "is the counter of host that connects to the same # device specified", "self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in self.spine_topology: rest_client", "\"\"\" Create NETCONF instances for each leaf and spine device.\"\"\" for leaf in", "port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port, segments) LOG.info(_(\"Delete port post-commit end.\"))", "elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass %s,", "neutron.plugins.ml2 import driver_api from oslo_log import log as logging from neutron.common import constants", "create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and port on physical device.\"\"\"", "LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find which spine device", "device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner) return segments", "= delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is", "return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real", "LOG.info(_(\"The network %s still have %d vms, \" \"ignore this operation.\"), network_id, vm_count)", "vm in database, ignore it\")) return # Delete configuration in device # only", "\"\"\" MechanismDriver will call it after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients()", "end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass", "if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip)", "device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is", "db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None or old_host_id == context.host: LOG.info(_(\"update", "device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration:", "= network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id", "our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual machine device", "= tools.topology_generator(self.spine_topology) # This dict is used to count the host number in", "leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in", "self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s,", "the same network and host. port_count = db.get_vm_count(network_id, host_id) if port_count == 1:", "device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "!= 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need to delete vlan.", "database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict", "machine %s has \" \"already inserted into the network %s.\"), str(port_id), str(device_id), str(network_id))", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to", "= port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s", "applicable law or agreed to in writing, software # distributed under the License", "= tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) # It", "\"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id = network['id'] tenant_id =", "# Migration is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments)", "= vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"),", "context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in our physical", "%s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for %s\"),", "need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is used", "= segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else:", "host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine %s has \"", "= context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id,", "device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf", "network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id,", "All rights reserved. # # Licensed under the Apache License, Version 2.0 (the", "%s, pass %s, url schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology,", "ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical devices.", "It is the counter of host that connects to the same # device", "writing, software # distributed under the License is distributed on an \"AS IS\"", "vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already", "configuration: \" \"All VMs of host %s in network %s is deleted. \"),", "self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass", "as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2", "value, # 'id': id, 'network_type': gre | vlan | vxlan }] segment_type =", "self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass %s, url", "compliance with the License. # You may obtain a copy of the License", "configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan", "db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg", "LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual machine device and DHCP port", "if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True:", "ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id,", "old_host_id is None or old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\")) return", "host_id = context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with", "True: LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to", "virtual machine and DHCP server's port. port = context.current device_owner = port['device_owner'] if", "device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when", "= self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self,", "vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] =", "'id': id, 'network_type': gre | vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id", "spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del']", "context): pass def update_port_postcommit(self, context): \"\"\"Just process the migration of virtual machine.\"\"\" port", "supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have %d vms,", "vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list =", "int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical", "port['device_id'] host_id = context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id']", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id)", "# This dict is used to count the host number in same network", "leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -=", "if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1", "return device_id = port['device_id'] host_id = context.host port_id = port['id'] tenant_id = port['tenant_id']", "leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip,", "device_owner) return device_id = port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id =", "end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list:", "(the \"License\"); # you may not use this file except in compliance with", "network. :param host_id. The host where the port created. :param vlan_id. Segmentation ID", "spine device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client)", "it\")) return # Delete configuration in device # only if it is the", "into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the count of", "Migration is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context)", "Just insert network information into database. When the port is created, we do", "for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list", "constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common", "self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration", "\"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip", "# Unless required by applicable law or agreed to in writing, software #", "self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such switch whose IP is %s", "by applicable law or agreed to in writing, software # distributed under the", "configured. \" \"There are %d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port", "# Create a thread.for sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology,", "our physical devices. :param network_id. The uuid of network. :param host_id. The host", "str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id,", "operations in our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current", "will call it after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend", "of virtual machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device", "not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network':", "LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host,", "self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if", "file except in compliance with the License. # You may obtain a copy", "as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG", "spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list =", "if it is the last vm of host in this network vm_count =", "network, # we will remove the configuration in the spine device. # And", "rest_client) for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def", "CONDITIONS OF ANY KIND, either express or # implied. # See the License", "= segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id,", "spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip", "device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner", "for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip]", "pass def update_port_postcommit(self, context): \"\"\"Just process the migration of virtual machine.\"\"\" port =", "= {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list =", "port. port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner", "= {} delete_config = {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip,", "[vlan_id] # Check which spine device connects to above leafs. # We need", "port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or", "such vm in database, ignore it\")) return # Delete configuration in device #", "dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self,", "leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan',", "== host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\", "in physical devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan']", "port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner !=", "str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the count of port that", "host_list) # It is the counter of host that connects to the same", "self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema,", "used to count the host number in same network # with leafs connected", "else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and port", "self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if", "if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting", "spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0)", "vm of host in this network vm_count = db.get_vm_count(network_id, host_id) if vm_count ==", "= config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username =", "__init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock", "under the License. from oslo.config import cfg from neutron.plugins.ml2 import driver_api from oslo_log", "device. # And remove the vlan configuration in the leaf device. for leaf_ip", "or # implied. # See the License for the specific language governing permissions", "not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"),", "== 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration :", "segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id,", "port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create", "= port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id,", "else: LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured.", "device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config:", "\\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config)", "is not None: client = self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such", "vlan_list)) # Check does spine need to delete vlan. for spine_ip in spine_delete_score:", "it after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful':", "# Find which spine device connects to the leaf device # which is", "[] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in leaf_generator: leaf_host", "segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id)", "host_list = db.get_host_list(network_id) # Find which leaf device connects to the host_id. leaf_need_configure", "def _create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf and spine device.\"\"\" for", "if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None) if client is None:", "config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc", "delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need to delete vlan. for spine_ip", "device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner) if not", "we do real operations in our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\"))", "leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine in", "by ip address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config = {} for", "leafs connected to spine. spine_delete_score = {} for spine_ip, topology in spine_generator: leaf_ip", "tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id,", "return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id)", "if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] =", "begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] segments = context.network_segments", "pass def delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\"))", "network vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical port configuration:", "the same network, # we will remove the configuration in the spine device.", "> 0: leaf_ref_host[leaf_ip] = True # If there is no host connects to", "configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip']", "postcommit: No changed.\")) return # Migration is happen. LOG.info(_(\"Migration is begin.\")) segments =", "= port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id,", "= netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return", "device connects to above leafs. # We need remove this spine's configuration. spine_generator", "device # which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in", "is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner", "context): \"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert", "if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update", "db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass def", "%s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result", "spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC", "has already configured. \" \"There are %d VMs in network %s.\"), port_count, network_id)", "physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual machine device and", "network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration", "%s is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if", "|= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list))", "netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper", "LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner) return device_id = port['device_id'] host_id", "already configured. \" \"There are %d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create", "str(leaf_need_configure)) # Find which spine device connects to the leaf device # which", "%s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured. \" \"There are %d VMs", "restful instances foreach leaf and spine device.\"\"\" for leaf in self.leaf_topology: rest_client =", "device_ip): \"\"\" Return a RPC client instance specified by device IP. \"\"\" client", "host_id. The host where the port created. :param vlan_id. Segmentation ID \"\"\" device_config_list", "vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip in", "%s when creating port.\"), device_owner) return device_id = port['device_id'] host_id = context.host port_id", "context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id,", "context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan =", "of host in this network vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1:", "spine_generator = tools.topology_generator(self.spine_topology) # This dict is used to count the host number", "leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for", "None: client = self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such switch whose", "device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"),", "= cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password =", "= restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for", "into the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into", "client is None: LOG.warn(_(\"No such switch whose IP is %s in \" \"the", "is None or old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\")) return #", "self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id']", "device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port", "port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"),", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied.", "if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan", "host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id", "pass def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id =", "a RPC client instance specified by device IP. \"\"\" client = None if", "port_id, network_id, tenant_id) if old_host_id is None or old_host_id == context.host: LOG.info(_(\"update port", "host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See", "leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list", "config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to", "self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No", "Technologies Co., Limited Copyright 2003-2015, All rights reserved. # # Licensed under the", "and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only process", "network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from", "\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner) return", "in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'],", "db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which", "context): \"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) #", "host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\", "it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert network information into database. When", "Check does spine need to delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip]", "port_id, network_id, tenant_id) # Get the count of port that created in the", "= context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id,", "from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id = network['id'] tenant_id =", "network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id)", "vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure))", "port_id = ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id):", "rights reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "== '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'],", "LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id':", "the License for the specific language governing permissions and # limitations under the", "for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"), vlan_del_list,", "network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list", "= db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical port configuration: \" \"All", "if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id': id,", "{} def initialize(self): \"\"\" MechanismDriver will call it after __init__. \"\"\" if self.rpc_backend", "= {} for leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if leaf_host in", "int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The", "port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of", "Return a RPC client instance specified by device IP. \"\"\" client = None", "context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner) if", "the leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip", "= {} def initialize(self): \"\"\" MechanismDriver will call it after __init__. \"\"\" if", "host_id) if vm_count == 1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs of", "host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has", "'network_type': gre | vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id']", "%s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have %d vms, \" \"ignore this", "self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username", "MechanismDriver will call it after __init__. \"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif", "{}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict:", "}] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network", "LOG.info(_(\"update port postcommit: No changed.\")) return # Migration is happen. LOG.info(_(\"Migration is begin.\"))", "1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id =", "if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan", "set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create']", "leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {})", "the vlan configuration in the leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip]", "KIND, either express or # implied. # See the License for the specific", "configuration in our physical devices. :param network_id. The uuid of network. :param host_id.", "limitations under the License. from oslo.config import cfg from neutron.plugins.ml2 import driver_api from", "= logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking hardware. Automation", "return client def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self,", "db.get_host_list(network_id) # Find which leaf device connects to the host_id. leaf_need_configure = []", "self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC client", "self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem'] == '':", "RPC client instance specified by device IP. \"\"\" client = None if self.rpc_clients", "rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf and spine device.\"\"\"", "configuration in the spine device. # And remove the vlan configuration in the", "don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just insert network information into", "post-commit begin.\")) # Only process virtual machine device and DHCP port port =", "host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host ==", "n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner) return device_id = port['device_id']", "_get_client(self, device_ip): \"\"\" Return a RPC client instance specified by device IP. \"\"\"", "device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list))", "host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id])", "# Find which leaf device connects to the host_id. leaf_need_configure = [] leaf_generator", "spine device. # And remove the vlan configuration in the leaf device. for", "for spine in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self):", "virtual machine device and DHCP port port = context.current device_owner = port['device_owner'] if", "= {} for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in", "Version 2.0 (the \"License\"); # you may not use this file except in", "\"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we", "will remove the configuration in the spine device. # And remove the vlan", "[]) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'],", "Automation for VLANs configure with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology", "device_owner) return device_id = port['device_id'] host_id = context.host port_id = port['id'] tenant_id =", "rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip)", "host_id) if port_count == 1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type", "== 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type", "and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which", "port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get", "context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port", "Only process virtual machine device and DHCP port port = context.current device_owner =", "delete_config = {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host", "else: LOG.info(_(\"The network %s still have %d vms, \" \"ignore this operation.\"), network_id,", "vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in", "count of port that created in the same network and host. port_count =", "this network vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical port", "vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check", "%s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. #", "spine in self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'],", "configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config:", "vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find which", "if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return", "topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host ==", "= port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None or", "db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\"))", "success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for %s.\"),", "delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del'])", "machine device and DHCP port port = context.current device_owner = port['device_owner'] if not", "context): pass def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id", "created in the same network and host. port_count = db.get_vm_count(network_id, host_id) if port_count", "logging from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common", "context.current network_id = network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id,", "self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan']", "host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id)", "in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip]", "vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if", "self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def", "on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only process virtual machine", "begin. Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP):", "append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find", "host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology)", "host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If", "port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process the migration", "%s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id):", "physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client =", "config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username", "if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network", "remove the vlan configuration in the leaf device. for leaf_ip in leaf_ref_vlans: if", "create vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create", "in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id])", "delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id,", "Ml2 Mechanism driver for HP networking hardware. Automation for VLANs configure with HP", "None or old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\")) return # Migration", "# device specified by ip address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config", "%s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend)", "_create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in our physical devices. :param network_id.", "postcommit begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] segments =", "if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported", "of host %s in network %s is deleted. \"), host_id, network_id) segment_type =", "%s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan %s for", "LOG.info(_(\"Begin create vlan network: device %s, \" \"create vlan %s, port trunk list", "reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf and spine device.\"\"\" for", "the specific language governing permissions and # limitations under the License. from oslo.config", "process virtual machine device and DHCP port port = context.current device_owner = port['device_owner']", "device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def", "for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip,", "'physical_network': value, # 'id': id, 'network_type': gre | vlan | vxlan }] segment_type", "the License. from oslo.config import cfg from neutron.plugins.ml2 import driver_api from oslo_log import", "topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip])", "cfg from neutron.plugins.ml2 import driver_api from oslo_log import log as logging from neutron.common", "network_id = network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id):", "vlan network: device %s, \" \"create vlan %s, port trunk list %s\"), dev_ip,", "device_id = port['device_id'] host_id = context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id", "cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower()", "vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf device connects to the", "context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if", "end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if", "'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass %s, url schema %s,\"", "host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host", "real operations in our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network =", "vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list)", "is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def", "tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure:", "%d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context):", "delete_port(self, host_id, ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id", "sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock", "Check which spine device connects to above leafs. # We need remove this", "port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine %s has \" \"already", "def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit", "to count the host number in same network # with leafs connected to", "leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip]", "the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip,", "if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If there is", "created. :param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute", "restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for each", "False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check", "instances foreach leaf and spine device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'],", "either express or # implied. # See the License for the specific language", "device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating port.\"), device_owner) return device_id", "port.\"), device_owner) return device_id = port['device_id'] host_id = context.host port_id = port['id'] tenant_id", "if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need to", "in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |=", "delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in", "network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def", "of host that connects to the same # device specified by ip address.", "%s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from", "vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) #", "def initialize(self): \"\"\" MechanismDriver will call it after __init__. \"\"\" if self.rpc_backend ==", "if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\", "network with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id': id,", "device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context):", "port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self,", "if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find which leaf", "or agreed to in writing, software # distributed under the License is distributed", "db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs", "virtual machine %s has \" \"already inserted into the network %s.\"), str(port_id), str(device_id),", "host_id, vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list", "oslo_log import log as logging from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common", "database, ignore it\")) return # Delete configuration in device # only if it", "tenant_id) if old_host_id is None or old_host_id == context.host: LOG.info(_(\"update port postcommit: No", "tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf", "leaf device connects to the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "of network. :param host_id. The host where the port created. :param vlan_id. Segmentation", "When the port is created, we do real operations in our physical device.", "is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration", "number in same network # with leafs connected to spine. spine_delete_score = {}", "leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'],", "= self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None:", "None) if client is None: LOG.warn(_(\"No such switch whose IP is %s in", "[{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type': gre | vlan | vxlan", "License. # You may obtain a copy of the License at # #", "No changed.\")) return # Migration is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments", "neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg", "the configuration in the spine device. # And remove the vlan configuration in", "vlan configuration in the leaf device. for leaf_ip in leaf_ref_vlans: if leaf_ref_host[leaf_ip] is", "owner %s when creating port.\"), device_owner) return device_id = port['device_id'] host_id = context.host", "process the migration of virtual machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update", "{}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list:", "for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments):", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for VLANs configure with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology =", "LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context):", "\" \"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We don't", "= port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute')", "not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config", "network_id, host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in", "port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id,", "when creating port.\"), device_owner) return device_id = port['device_id'] host_id = context.host port_id =", "neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class", "= [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id,", "%s in \" \"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\"", "%s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the", "if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore", "= None if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None) if client", "\" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\"))", "that created in the same network and host. port_count = db.get_vm_count(network_id, host_id) if", "to the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for", "set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If there is no host connects", "if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\", "License, Version 2.0 (the \"License\"); # you may not use this file except", "port begin. Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner ==", "from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import", "host connects to leaf in the same network, # we will remove the", "migration of virtual machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin.", "remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is used to", "for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip)", "self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances for each leaf and spine", "network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None", "device connects to the leaf device # which is configured above. spine_generator =", "| vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id,", "spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def", "connects to the same # device specified by ip address. leaf_ref_vlans = {}", "port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore it\")) return # Delete", "leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in", "in our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id", "host. port_count = db.get_vm_count(network_id, host_id) if port_count == 1: segments = context.network.network_segments segment_type", "network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore it\")) return # Delete configuration", "LOG.info(_(\"The port %s of virtual machine %s has \" \"already inserted into the", "LOG.warn(_(\"No such switch whose IP is %s in \" \"the configuration file.\"), str(device_ip))", "segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id'])", "= ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No", "in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip", "\" \"There are %d VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\"))", "hardware. Automation for VLANs configure with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config()", "to delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and", "self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch", "1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs of host %s in network", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "need to delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\", "def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id", "device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self,", "self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach leaf", "real configuration in our physical devices. :param network_id. The uuid of network. :param", "host_id, ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id =", "no host connects to leaf in the same network, # we will remove", "spine_delete_score = {} for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip", "is True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip]", "if rpc_client is not None: LOG.info(_(\"Begin create vlan network: device %s, \" \"create", "to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and port on physical", "for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], rest_client) for spine", "if result is True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End", "network begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id,", "port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result", "device_id = port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id", "# only if it is the last vm of host in this network", "vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"),", "LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass", "to the leaf device # which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for", "\" \"create vlan %s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result =", "rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a", "user %s, pass %s, url schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology,", "'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s,", "%s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports,", "configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is used to count the host", "count the host number in same network # with leafs connected to spine.", "= segments[0]['network_type'] if segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else:", "vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device", "delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True:", "= True # If there is no host connects to leaf in the", "delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip", "the migration of virtual machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port", "by device IP. \"\"\" client = None if self.rpc_clients is not None: client", "leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine", "sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create", "in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list", "None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s", "OR CONDITIONS OF ANY KIND, either express or # implied. # See the", "delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete", "= db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id) # Find", "port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id)", "False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |=", "-= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device connects to above", "neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful", "host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) # It is the", "%d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create", "db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our", "configuration in device # only if it is the last vm of host", "leaf and spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem']", "= self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine", "LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else:", "\"\"\" Just insert network information into database. When the port is created, we", "network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network", "port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list =", "begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self,", "the spine device. # And remove the vlan configuration in the leaf device.", "log as logging from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import tools", "have %d vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id,", "\"\"\"Do real configuration in our physical devices. :param network_id. The uuid of network.", "in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', [])", "use this file except in compliance with the License. # You may obtain", "= cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients =", "{} for leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if leaf_host in host_list:", "if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username,", "# If there is no host connects to leaf in the same network,", "# Get the count of port that created in the same network and", "self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem'] == '': spine['oem']", "str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass def", "collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator", "== 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type", "self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create vlan network: device %s, \"", "network and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only", "restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\"", "network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network and", "True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan network\"))", "def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def", "leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'],", "host in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] =", "return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\"", "vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def create_port_postcommit(self, context): \"\"\"Create network", "from oslo.config import cfg from neutron.plugins.ml2 import driver_api from oslo_log import log as", "# Here we only process virtual machine and DHCP server's port. port =", "db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) # It is the counter of", "neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for", "network_id, tenant_id) if old_host_id is None or old_host_id == context.host: LOG.info(_(\"update port postcommit:", ": %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration", "spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict)", "device connects to the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref =", "The uuid of network. :param host_id. The host where the port created. :param", "leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password)", "owner %s when deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port, segments)", "= context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP:", "self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine", "spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is used to count the", "set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del']", "of port that created in the same network and host. port_count = db.get_vm_count(network_id,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "host_id, vlan_id) # Execute configuration in physical devices. for dev_ip in device_config_list: vlan_list", "<reponame>web8search/neutron-Neutron-<gh_stars>1-10 # -*- coding: utf-8 -*- # # H3C Technologies Co., Limited Copyright", "If there is no host connects to leaf in the same network, #", "type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have %d vms, \" \"ignore", "neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc", "connects to the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {}", "context.current network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "%s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip)", "vlan_id) # Execute configuration in physical devices. for dev_ip in device_config_list: vlan_list =", "self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will call it after __init__. \"\"\"", "trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id,", "import log as logging from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.hp.common import", "= self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical devices. for dev_ip in", "vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host) else: if len(set([vlan_id]) &", "is None: LOG.warn(_(\"No such switch whose IP is %s in \" \"the configuration", "LOG.info(_(\"No such vm in database, ignore it\")) return # Delete configuration in device", "Delete network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id =", "leaf in the same network, # we will remove the configuration in the", "oslo.config import cfg from neutron.plugins.ml2 import driver_api from oslo_log import log as logging", "with the License. # You may obtain a copy of the License at", "= db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is None or old_host_id == context.host:", "set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device connects to above leafs.", "instances for each leaf and spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem']", "connects to leaf in the same network, # we will remove the configuration", "begin.\")) network = context.current network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id):", "tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id,", "for each leaf and spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] ==", "%s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s", "= int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type) else:", "physical device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only process virtual machine and", "leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) #", "port configuration: \" \"All VMs of host %s in network %s is deleted.", "law or agreed to in writing, software # distributed under the License is", "trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True:", "segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id = int(segment_id)", "self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network", "not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"),", "network postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self,", "[]) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host) else:", "switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap", "n_const from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db", "into database. When the port is created, we do real operations in our", "ANY KIND, either express or # implied. # See the License for the", "== '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'],", "spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config:", "None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower()", "LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self,", "network_id, tenant_id) # Get the count of port that created in the same", "collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not", "port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP:", "in compliance with the License. # You may obtain a copy of the", "spine need to delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] == 0", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= {} leaf_ref_host = {} delete_config = {} for leaf_ip, topology in leaf_generator:", "vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list", "ip address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config = {} for leaf_ip,", "client = None if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None) if", "# -*- coding: utf-8 -*- # # H3C Technologies Co., Limited Copyright 2003-2015,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "network_id, host_id, vlan_id): \"\"\"Do real configuration in our physical devices. :param network_id. The", "# Execute configuration in physical devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create']", "restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism", "vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"), segment_type)", "len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need to delete", "host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'],", "%s, \" \"create vlan %s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result", "driver_api from oslo_log import log as logging from neutron.common import constants as n_const", "specific language governing permissions and # limitations under the License. from oslo.config import", "# # H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved. # #", "LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking hardware.", "in this network vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical", "leaf_ip_ref = {} for leaf_ip, topology in leaf_generator: leaf_host = topology['host'] if leaf_host", "\"create vlan %s, port trunk list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list)", ":param host_id. The host where the port created. :param vlan_id. Segmentation ID \"\"\"", "for the specific language governing permissions and # limitations under the License. from", "import driver_api from oslo_log import log as logging from neutron.common import constants as", "express or # implied. # See the License for the specific language governing", "\"\"\" client = None if self.rpc_clients is not None: client = self.rpc_clients.get(device_ip, None)", "%s, user %s, pass %s, url schema %s,\" \"timeout %d, rpc backend %s\"),", "network information into database. When the port is created, we do real operations", "configs with leaf %s.\"), str(leaf_need_configure)) # Find which spine device connects to the", "for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan", "set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip] |= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {})", "file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass", "to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock()", "creating port.\"), device_owner) return device_id = port['device_id'] host_id = context.host port_id = port['id']", "delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device connects", "network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete", "1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', [])", "vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list =", "network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id,", "with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, # 'id': id, 'network_type':", "is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip =", "= topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip]", "int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower()", "OF ANY KIND, either express or # implied. # See the License for", "HP networking hardware. Automation for VLANs configure with HP switches. \"\"\" def __init__(self,", "nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\"", "def create_port_postcommit(self, context): \"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create port begin.\"))", "when update port.\"), device_owner) return device_id = port['device_id'] port_id = port['id'] tenant_id =", "rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create", "connects to the leaf device # which is configured above. spine_generator = tools.topology_generator(self.spine_topology)", "LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process the", "gre | vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id,", "network = context.current network_id = network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete", "= self.rpc_clients.get(device_ip, None) if client is None: LOG.warn(_(\"No such switch whose IP is", "port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments): with self.sync_lock:", "the same # device specified by ip address. leaf_ref_vlans = {} leaf_ref_host =", "= rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"),", "self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF", "= port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s", "for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list,", "else: LOG.info(_(\"Physical switch has already configured. \" \"There are %d VMs in network", "list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# Check does spine need to delete vlan. for spine_ip in spine_delete_score: if", "permissions and # limitations under the License. from oslo.config import cfg from neutron.plugins.ml2", "else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have", "update port.\"), device_owner) return device_id = port['device_id'] port_id = port['id'] tenant_id = port['tenant_id']", "LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\"))", "configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\"", "port owner %s when creating port.\"), device_owner) return device_id = port['device_id'] host_id =", "do real operations in our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network", "leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in leaf_generator: leaf_host =", "\" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self,", "spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id]", "self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful", "vlan_id): \"\"\"Do real configuration in our physical devices. :param network_id. The uuid of", "device.\"\"\" LOG.info(_(\"Create port begin.\")) # Here we only process virtual machine and DHCP", "in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def", "%s.\"), str(leaf_need_configure)) # Find which spine device connects to the leaf device #", "client def create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self, context):", "ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database,", "def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap", "to spine. spine_delete_score = {} for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip']", "physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients, self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start()", "vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create", "switch has already configured. \" \"There are %d VMs in network %s.\"), port_count,", "this file except in compliance with the License. # You may obtain a", "leaf %s.\"), str(leaf_need_configure)) # Find which spine device connects to the leaf device", "= context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id) #", "class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking hardware. Automation for VLANs", "n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner) return device_id = port['device_id']", "and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner) return", "str(segment_type)) else: LOG.info(_(\"The network %s still have %d vms, \" \"ignore this operation.\"),", "{} leaf_ref_host = {} delete_config = {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip,", "port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just", "context): \"\"\"Just process the migration of virtual machine.\"\"\" port = context.current device_owner =", "LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s still have %d", "inserted into the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information", "host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan', []) delete_config[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) delete_config[leaf_ip]['vlan_del'] = []", "does spine need to delete vlan. for spine_ip in spine_delete_score: if spine_delete_score[spine_ip] ==", "vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if", "spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration", "%s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync", "\"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap =", "'': leaf['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client)", "list %s\"), host_list) # It is the counter of host that connects to", "configure with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology", "segment_type == 'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network", "self.sync_timeout, self.sync_overlap) self.sync_lock = self.sync_helper.get_lock() self.sync_helper.start() def _create_rest_clients(self): \"\"\" Create restful instances foreach", "dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id", "host_id, vlan_id): device_config_dict = {} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list:", "from neutron.plugins.ml2.drivers.hp.common import tools from neutron.plugins.ml2.drivers.hp.common import config from neutron.plugins.ml2.drivers.hp.common import db from", "the leaf device # which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip,", "== 1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs of host %s in", "cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {}", "host where the port created. :param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id,", "self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC client instance specified", "host_id, vlan_id): \"\"\"Do real configuration in our physical devices. :param network_id. The uuid", "vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id,", "db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine %s has", "such switch whose IP is %s in \" \"the configuration file.\"), str(device_ip)) return", "= {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) !=", "[] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need", "rpc_client = self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create vlan network: device", "db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value,", "# which is configured above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator:", "import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver):", "# Only process virtual machine device and DHCP port port = context.current device_owner", "%s, spine %s, user %s, pass %s, url schema %s,\" \"timeout %d, rpc", "topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if", "= None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema =", "self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration to physical", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "{} vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id not in vlan_list: vlan_list.append(vlan_id) host_list = db.get_host_list(network_id)", "\"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context): \"\"\" We don't care", "# We need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict", "create_network_precommit(self, context): \"\"\" We don't care it.\"\"\" pass def create_network_postcommit(self, context): \"\"\" Just", "id, 'physical_network': value, # 'id': id, 'network_type': gre | vlan | vxlan }]", "devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual machine device and DHCP", "= rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True:", "tenant_id = port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if", "operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real", "segment_type) else: LOG.info(_(\"Physical switch has already configured. \" \"There are %d VMs in", "LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id", "pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in our physical devices.", "spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is", "not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore it\"))", "required by applicable law or agreed to in writing, software # distributed under", "language governing permissions and # limitations under the License. from oslo.config import cfg", "def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id']", "LOG.info(_(\"Physical switch has already configured. \" \"There are %d VMs in network %s.\"),", "NETCONF instances for each leaf and spine device.\"\"\" for leaf in self.leaf_topology: if", "== n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner) return device_id =", "url schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema,", "spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'],", "port postcommit: No changed.\")) return # Migration is happen. LOG.info(_(\"Migration is begin.\")) segments", "physical devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client", "if port_count == 1: segments = context.network.network_segments segment_type = segments[0]['network_type'] if segment_type ==", "\"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan':", "rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create NETCONF instances", "id, 'network_type': gre | vlan | vxlan }] segment_type = segments[0]['network_type'] segment_id =", "= ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in", "DHCP server's port. port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and", "delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is", "from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver", "information into database. When the port is created, we do real operations in", "uuid of network. :param host_id. The host where the port created. :param vlan_id.", "counter of host that connects to the same # device specified by ip", "HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology", "\"\"\" Ml2 Mechanism driver for HP networking hardware. Automation for VLANs configure with", "# Delete configuration in device # only if it is the last vm", "= context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner)", "from our physical devices.\"\"\" LOG.info(_(\"Delete port post-commit begin.\")) # Only process virtual machine", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "sync_helper LOG = logging.getLogger(__name__) class HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking", "= delete_config[dev_ip]['vlan_del'] if rpc_client is not None: if rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list)", ":param vlan_id. Segmentation ID \"\"\" device_config_list = self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration", "postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context):", "db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such vm in database, ignore it\")) return", "= ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id,", "switch whose IP is %s in \" \"the configuration file.\"), str(device_ip)) return client", "\"\"\" if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s,", "vlan host list %s\"), host_list) # It is the counter of host that", "= db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id:", "= network['id'] tenant_id = network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"),", "Find which leaf device connects to the host_id. leaf_need_configure = [] leaf_generator =", "device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) #", "set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting", "not in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list))", "leaf_ref_vlans = {} leaf_ref_host = {} delete_config = {} for leaf_ip, topology in", "else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If there", "self.spine_topology: if spine['oem'] == '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema,", "port post-commit begin.\")) # Only process virtual machine device and DHCP port port", "utf-8 -*- # # H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved.", "= [vlan_id] # Check which spine device connects to above leafs. # We", "in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if", "if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id:", "spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip", "the counter of host that connects to the same # device specified by", "host %s in network %s is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type']", "network and host. port_count = db.get_vm_count(network_id, host_id) if port_count == 1: segments =", "delete_config[leaf_ip]['vlan_del'] = [] if host in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan))", "def _get_client(self, device_ip): \"\"\" Return a RPC client instance specified by device IP.", "network_id. The uuid of network. :param host_id. The host where the port created.", "# you may not use this file except in compliance with the License.", "is deleted. \"), host_id, network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type", "our physical device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id =", "device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration", "Here we only process virtual machine and DHCP server's port. port = context.current", "port['id'] tenant_id = port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id,", "devices.\"\"\" delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip)", "self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a RPC client instance", "and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return", "end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process the migration of", "network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id))", "self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip):", "delete vlan %s for %s.\"), vlan_del_list, dev_ip) else: LOG.warn(_(\"Failed to port trunk %s", "= self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create vlan network: device %s,", "2003-2015, All rights reserved. # # Licensed under the Apache License, Version 2.0", "= ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if not", "cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will call", "above leafs. # We need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) #", "'vlan': vlan_id = int(segments[0]['segmentation_id']) self._create_vlan_network(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s\"),", "HPDriver(driver_api.MechanismDriver): \"\"\" Ml2 Mechanism driver for HP networking hardware. Automation for VLANs configure", "return # Migration is happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port,", "in self.spine_topology: rest_client = restful_cfg.RestfulCfg(spine['ip'], self.username, self.password) self.rpc_clients.setdefault(spine['ip'], rest_client) def _create_nc_clients(self): \"\"\" Create", "netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def _get_client(self, device_ip): \"\"\" Return a", "vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete physical port configuration: \"", "governing permissions and # limitations under the License. from oslo.config import cfg from", "self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper", "leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if", "tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) # It is", "Create a thread.for sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology, self.spine_topology, self.rpc_clients,", "port that created in the same network and host. port_count = db.get_vm_count(network_id, host_id)", "which spine device connects to the leaf device # which is configured above.", "= segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\"))", "collecting spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find which spine device connects", "update_port_postcommit(self, context): \"\"\"Just process the migration of virtual machine.\"\"\" port = context.current device_owner", "in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client", "network: device %s, \" \"create vlan %s, port trunk list %s\"), dev_ip, vlan_list,", "information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the count", "self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend", "self.sync_helper = None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will call it", "True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list,", "network_id, tenant_id): LOG.info(_(\"The port %s of virtual machine %s has \" \"already inserted", "segments): with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id", "append((topology['leaf_ports'], spine_vlan_list)) LOG.info(_(\"Collect device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass", "License for the specific language governing permissions and # limitations under the License.", "== 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user", "if self.rpc_backend == 'netconf': self._create_nc_clients() elif self.rpc_backend == 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine", "VMs in network %s.\"), port_count, network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass", "def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical devices.\"\"\" delete_config =", "append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if len(delete_config[leaf_ip]['vlan_del']) != 0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) #", "License. from oslo.config import cfg from neutron.plugins.ml2 import driver_api from oslo_log import log", "in spine_generator: leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if", "\"License\"); # you may not use this file except in compliance with the", "= segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context):", "topology in leaf_generator: leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip]", "%s when update port.\"), device_owner) return device_id = port['device_id'] port_id = port['id'] tenant_id", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "%s when deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port, segments) LOG.info(_(\"Delete", "leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in", "initialize(self): \"\"\" MechanismDriver will call it after __init__. \"\"\" if self.rpc_backend == 'netconf':", "if segment_type == 'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported", "%s still have %d vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id,", "spine's configs with leaf %s.\"), str(leaf_need_configure)) # Find which spine device connects to", "self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem = cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients", "physical devices. :param network_id. The uuid of network. :param host_id. The host where", "owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore port", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "LOG.info(_(\"Create port begin.\")) # Here we only process virtual machine and DHCP server's", "!= n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner) return segments =", "= device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not None: LOG.info(_(\"Begin create vlan", "IP is %s in \" \"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self,", "Mechanism driver for HP networking hardware. Automation for VLANs configure with HP switches.", "%s.\"), dev_ip) LOG.info(_(\"End create vlan network\")) else: LOG.warn(_(\"Failed to create vlan network\")) def", "if vm_count == 1: LOG.info(_(\"Delete physical port configuration: \" \"All VMs of host", "in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client", "which spine device connects to above leafs. # We need remove this spine's", "%s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments): with", "machine and DHCP server's port. port = context.current device_owner = port['device_owner'] if not", "list %s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result", "in the same network, # we will remove the configuration in the spine", "update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete network", "str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id,", "cfg.CONF.ml2_hp.oem.lower() self.rpc_backend = cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def initialize(self): \"\"\"", "from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from neutron.plugins.ml2.drivers.hp import sync_helper LOG = logging.getLogger(__name__)", "[vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id):", "port owner %s when deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port,", "-*- # # H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved. #", "host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {}) delete_config[leaf_ip].setdefault('port_vlan',", "is the last vm of host in this network vm_count = db.get_vm_count(network_id, host_id)", "# Check which spine device connects to above leafs. # We need remove", "= int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema = cfg.CONF.ml2_hp.schema.lower() self.default_oem =", "network_id) LOG.info(_(\"Create port end.\")) def update_port_precommit(self, context): pass def update_port_postcommit(self, context): \"\"\"Just process", "which leaf device connects to the host_id. leaf_need_configure = [] leaf_generator = tools.topology_generator(self.leaf_topology)", "rpc_client.port_trunk_bulk(port_vlan_tuple_list) is True: if rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success for", "tenant_id): LOG.info(_(\"No such vm in database, ignore it\")) return # Delete configuration in", "str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id,", "else: LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self, context):", "rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock =", "if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list =", "LOG.info(_(\"Update port begin. Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner", "delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list = delete_config[dev_ip]['port_vlan'] vlan_del_list = delete_config[dev_ip]['vlan_del'] if rpc_client is", "port is created, we do real operations in our physical device. \"\"\" LOG.info(_(\"Create", "context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id):", "= context.current network_id = network['id'] tenant_id = network['tenant_id'] segments = context.network_segments if not", "= topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if", "len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If there is no", "above. spine_generator = tools.topology_generator(self.spine_topology) for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if", "segments[0]['segmentation_id'] db.create_network(tenant_id, network_id, segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass", "leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if", "2.0 (the \"License\"); # you may not use this file except in compliance", "pass def update_network_postcommit(self, context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\"", "= db.get_host_list(network_id) LOG.info(_(\"Delete vlan host list %s\"), host_list) # It is the counter", "is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list) if result is True: LOG.info(_(\"Create vlan config successful", "from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from neutron.plugins.ml2.drivers.hp.rpc import restful as restful_cfg from", "db.get_vlanlist_byhost(host_id) if vlan_id in vlan_list: vlan_list.remove(vlan_id) leaf_generator = tools.topology_generator(self.leaf_topology) host_list = db.get_host_list(network_id) LOG.info(_(\"Delete", "cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password", "%s of virtual machine %s has \" \"already inserted into the network %s.\"),", "delete_config) return delete_config def delete_vlan_config(self, network_id, host_id, vlan_id): \"\"\"Delete vlan configuration from physical", "for leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] = self.default_oem nc_client =", "if host in host_list: leaf_ref_vlans[leaf_ip] |= set(host_vlan) if host == host_id: delete_config.setdefault(leaf_ip, {})", "LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {} vlan_list =", "network\")) def create_port_postcommit(self, context): \"\"\"Create network and port on physical device.\"\"\" LOG.info(_(\"Create port", "= db.get_vm_count(network_id, host_id) if port_count == 1: segments = context.network.network_segments segment_type = segments[0]['network_type']", "host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"), str(segment_type)) else: LOG.info(_(\"The network %s", "# # Unless required by applicable law or agreed to in writing, software", "network['tenant_id'] segments = context.network_segments if not db.is_network_created(tenant_id, network_id): LOG.info(_(\"Create network with id %s.\"),", "import config from neutron.plugins.ml2.drivers.hp.common import db from neutron.plugins.ml2.drivers.hp.rpc import netconf as netconf_cfg from", "port port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and\\ device_owner !=", "== 'restful': self._create_rest_clients() LOG.info(_(\"leaf %s, spine %s, user %s, pass %s, url schema", "self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) # Create a thread.for sync configuration to", "%d vms, \" \"ignore this operation.\"), network_id, vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id)", "client instance specified by device IP. \"\"\" client = None if self.rpc_clients is", "leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip])", "dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result = rpc_client.port_trunk_bulk(port_vlan_tuple_list)", "host_id, port_id, network_id, tenant_id) # Get the count of port that created in", "device configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id,", "%s\"), host_list) # It is the counter of host that connects to the", "delete_config = self.collect_delete_config(network_id, host_id, vlan_id) for dev_ip in delete_config: rpc_client = self._get_client(dev_ip) port_vlan_tuple_list", "network_id) segment_type = segments[0]['network_type'] segment_id = segments[0]['segmentation_id'] if segment_type == 'vlan': vlan_id =", "begin.\")) # Here we only process virtual machine and DHCP server's port. port", "def delete_port_precommit(self, context): pass def delete_port(self, host_id, ports, segments): with self.sync_lock: network_id =", "the last vm of host in this network vm_count = db.get_vm_count(network_id, host_id) if", "leaf_generator: leaf_host = topology['host'] if leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host))", "'vlan': vlan_id = int(segment_id) self.delete_vlan_config(network_id, host_id, vlan_id) else: LOG.info(_(\"Not supported network type %s.\"),", "host list %s\"), host_list) # It is the counter of host that connects", "segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context): pass", "type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured. \" \"There are %d", "db.create_vm(device_id, host_id, port_id, network_id, tenant_id) # Get the count of port that created", "the License. # You may obtain a copy of the License at #", "or old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\")) return # Migration is", "supported network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured. \" \"There", "rpc_client.delete_vlan_bulk(vlan_del_list) is True: LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else:", "self.rpc_backend) # Create a thread.for sync configuration to physical device. self.sync_helper = sync_helper.SyncHelper(self.leaf_topology,", "segment_id, segment_type) LOG.info(_(\"Create network postcommit end.\")) def update_network_precommit(self, context): pass def update_network_postcommit(self, context):", "vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list = device_config_list[dev_ip]['port_vlan'] rpc_client = self._get_client(dev_ip) if rpc_client is not", "vm_count) db.delete_vm(device_id, host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from", "from database.\"), network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id):", "host_id, port_id, network_id, tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical", "self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem'] == '': spine['oem'] =", "topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host)", "= port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner", "happen. LOG.info(_(\"Migration is begin.\")) segments = context.network.network_segments self.delete_port(old_host_id, port, segments) self.create_port_postcommit(context) LOG.info(_(\"Migration is", "self.sync_lock = None self.sync_timeout = int(cfg.CONF.ml2_hp.sync_time) self.username = cfg.CONF.ml2_hp.username self.password = cfg.CONF.ml2_hp.password self.url_schema", "leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host", "if leaf_ref_host[leaf_ip] is False and leaf_ip in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] =", "def update_port_postcommit(self, context): \"\"\"Just process the migration of virtual machine.\"\"\" port = context.current", "create vlan network: device %s, \" \"create vlan %s, port trunk list %s\"),", "not None: LOG.info(_(\"Begin create vlan network: device %s, \" \"create vlan %s, port", "if leaf_ip in leaf_need_configure: spine_vlan_list = list(leaf_ip_ref[leaf_ip]) if spine_ip not in device_config_dict: device_config_dict.setdefault(spine_ip,", "leaf_host in host_list: leaf_ip_ref.setdefault(leaf_ip, set([])) leaf_ip_ref[leaf_ip] |= set(db.get_vlanlist_byhost(leaf_host)) if leaf_host == host_id: leaf_ip_ref[leaf_ip]", "with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology =", "%s\"), dev_ip, vlan_list, port_vlan_tuple_list) result = rpc_client.create_vlan_bulk(vlan_list) if result is True: result =", "device_owner.startswith('compute') and\\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when deleting port.\"), device_owner)", "|= set([vlan_id]) device_config_dict.setdefault(leaf_ip, {}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip)", "= [] if host in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) >", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "created, we do real operations in our physical device. \"\"\" LOG.info(_(\"Create network postcommit", "context): pass def delete_network_precommit(self, context): pass def delete_network_postcommit(self, context): \"\"\" Delete network information", "result is True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create", "in the same network and host. port_count = db.get_vm_count(network_id, host_id) if port_count ==", "in our physical devices. :param network_id. The uuid of network. :param host_id. The", "with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"The port %s of virtual", "spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list = list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {}", "create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in our", "self.default_oem nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in", "that connects to the same # device specified by ip address. leaf_ref_vlans =", "host number in same network # with leafs connected to spine. spine_delete_score =", "foreach leaf and spine device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username,", "dict is used to count the host number in same network # with", "leaf_ip = topology['leaf_ip'] if leaf_ip in leaf_ref_vlans: spine_delete_score.setdefault(spine_ip, 0) if leaf_ref_host[leaf_ip] is True:", "self.collect_create_config(network_id, host_id, vlan_id) # Execute configuration in physical devices. for dev_ip in device_config_list:", "True # If there is no host connects to leaf in the same", "port['tenant_id'] network_id = port['network_id'] old_host_id = db.get_vm_host(device_id, port_id, network_id, tenant_id) if old_host_id is", "LOG.info(_(\"Delete vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete", "network_id) db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict =", "with leaf %s.\"), str(leaf_need_configure)) # Find which spine device connects to the leaf", "= network['tenant_id'] if db.is_network_created(tenant_id, network_id): LOG.info(_(\"Delete network %s from database.\"), network_id) db.delete_network(tenant_id, network_id)", "set([])) leaf_ref_host.setdefault(leaf_ip, False) host = topology['host'] host_vlan = db.get_vlanlist_byhost(host) if host in host_list:", "virtual machine.\"\"\" port = context.current device_owner = port['device_owner'] LOG.info(_(\"Update port begin. Device owner", "network information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id = network['id']", "is True: LOG.info(_(\"Create vlan config successful for\" \" %s.\"), dev_ip) LOG.info(_(\"End create vlan", "to the same # device specified by ip address. leaf_ref_vlans = {} leaf_ref_host", "network_id): LOG.info(_(\"Create network with id %s.\"), network_id) # [{'segmentation_id': id, 'physical_network': value, #", "We need remove this spine's configuration. spine_generator = tools.topology_generator(self.spine_topology) # This dict is", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "LOG.info(_(\"Delete vlan host list %s\"), host_list) # It is the counter of host", "only process virtual machine and DHCP server's port. port = context.current device_owner =", "{}) device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's", "Execute configuration in physical devices. for dev_ip in device_config_list: vlan_list = device_config_list[dev_ip]['vlan_create'] port_vlan_tuple_list", "is no host connects to leaf in the same network, # we will", "nc_client = netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology:", "= port['tenant_id'] network_id = port['network_id'] with self.sync_lock: if db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id):", "deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port, segments) LOG.info(_(\"Delete port post-commit", "%s, url schema %s,\" \"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password,", "return device_id = port['device_id'] port_id = port['id'] tenant_id = port['tenant_id'] network_id = port['network_id']", "old_host_id == context.host: LOG.info(_(\"update port postcommit: No changed.\")) return # Migration is happen.", "with self.sync_lock: network_id = ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id =", "we will remove the configuration in the spine device. # And remove the", "network_id = ports['network_id'] device_id = ports['device_id'] port_id = ports['id'] tenant_id = ports['tenant_id'] if", "[]) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs with", "def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id, vlan_id): \"\"\"Do real configuration in", "Get the count of port that created in the same network and host.", "spine['oem'] == '': spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password)", "ports['id'] tenant_id = ports['tenant_id'] if not db.is_vm_created(device_id, host_id, port_id, network_id, tenant_id): LOG.info(_(\"No such", "VLANs configure with HP switches. \"\"\" def __init__(self, rpc=None): config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology", "\"\"\" Return a RPC client instance specified by device IP. \"\"\" client =", "specified by ip address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config = {}", "import cfg from neutron.plugins.ml2 import driver_api from oslo_log import log as logging from", "# H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved. # # Licensed", "context.current device_owner = port['device_owner'] if not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore", "if host in host_list: host_list.remove(host) else: if len(set([vlan_id]) & set(host_vlan)) > 0: leaf_ref_host[leaf_ip]", "is %s in \" \"the configuration file.\"), str(device_ip)) return client def create_network_precommit(self, context):", "in spine_delete_score: if spine_delete_score[spine_ip] == 0 \\ and spine_ip in delete_config: delete_config[spine_ip]['vlan_del'] =", "device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip in device_config_dict: device_config_dict[leaf_ip]['port_vlan'].\\", "= port['device_id'] host_id = context.host port_id = port['id'] tenant_id = port['tenant_id'] network_id =", "address. leaf_ref_vlans = {} leaf_ref_host = {} delete_config = {} for leaf_ip, topology", "is end.\")) def collect_delete_config(self, network_id, host_id, vlan_id): vlan_list = db.get_vlanlist_byhost(host_id) if vlan_id in", "and spine device.\"\"\" for leaf in self.leaf_topology: if leaf['oem'] == '': leaf['oem'] =", "device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if leaf_ip", "not device_owner.startswith('compute') and \\ device_owner != n_const.DEVICE_OWNER_DHCP: LOG.info(_(\"Ignore port owner %s when creating", "& set(host_vlan)) > 0: leaf_ref_host[leaf_ip] = True # If there is no host", "context): pass def delete_network_postcommit(self, context): \"\"\" Delete network information from database.\"\"\" LOG.info(_(\"Delete network", "spine. spine_delete_score = {} for spine_ip, topology in spine_generator: leaf_ip = topology['leaf_ip'] if", "except in compliance with the License. # You may obtain a copy of", "Delete configuration in device # only if it is the last vm of", "information from database.\"\"\" LOG.info(_(\"Delete network begin.\")) network = context.current network_id = network['id'] tenant_id", "# And remove the vlan configuration in the leaf device. for leaf_ip in", "device_config_dict[leaf_ip].setdefault('port_vlan', []) device_config_dict[leaf_ip]['vlan_create'] = vlan_list device_config_dict[leaf_ip]['port_vlan'].\\ append((topology['ports'], vlan_list)) leaf_need_configure.append(leaf_ip) LOG.info(_(\"Starting collecting spine's configs", "0) if leaf_ref_host[leaf_ip] is True: spine_delete_score[spine_ip] += 1 if leaf_ip in delete_config: vlan_list", "H3C Technologies Co., Limited Copyright 2003-2015, All rights reserved. # # Licensed under", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "config.HPML2Config() self.leaf_topology = config.HPML2Config.leaf_topology self.spine_topology = config.HPML2Config.spine_topology self.sync_overlap = cfg.CONF.ml2_hp.sync_overlap self.sync_lock = None", "= tools.topology_generator(self.leaf_topology) leaf_ip_ref = {} for leaf_ip, topology in leaf_generator: leaf_host = topology['host']", "db.delete_network(tenant_id, network_id) LOG.info(_(\"Delete network end.\")) def collect_create_config(self, network_id, host_id, vlan_id): device_config_dict = {}", "# 'id': id, 'network_type': gre | vlan | vxlan }] segment_type = segments[0]['network_type']", "LOG.info(_(\"leaf %s, spine %s, user %s, pass %s, url schema %s,\" \"timeout %d,", "netconf_cfg.NetConfigClient(leaf['oem'], leaf['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(leaf['ip'], nc_client) for spine in self.spine_topology: if spine['oem']", "spine['oem'] = self.default_oem nc_client = netconf_cfg.NetConfigClient(spine['oem'], spine['ip'], self.url_schema, self.username, self.password) self.rpc_clients.setdefault(spine['ip'], nc_client) def", "tenant_id) def delete_port_postcommit(self, context): \"\"\"Delete real configuration from our physical devices.\"\"\" LOG.info(_(\"Delete port", "tenant_id) # Get the count of port that created in the same network", "list(leaf_ref_vlans[leaf_ip]) delete_config[spine_ip] = {} delete_config[spine_ip].setdefault('port_vlan', []) delete_config[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], vlan_list)) delete_config[spine_ip]['vlan_del'] = [] if", "port begin.\")) # Here we only process virtual machine and DHCP server's port.", "and DHCP server's port. port = context.current device_owner = port['device_owner'] if not device_owner.startswith('compute')", "configuration: %s\"), device_config_dict) return device_config_dict def create_port_precommit(self, context): pass def _create_vlan_network(self, network_id, host_id,", "when deleting port.\"), device_owner) return segments = context.network.network_segments self.delete_port(context.host, port, segments) LOG.info(_(\"Delete port", "\"already inserted into the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port %s's", "LOG.info(_(\"Insert port %s's information into database.\"), str(port_id)) db.create_vm(device_id, host_id, port_id, network_id, tenant_id) #", "LOG.info(_(\"Ignore port owner %s when update port.\"), device_owner) return device_id = port['device_id'] port_id", "we only process virtual machine and DHCP server's port. port = context.current device_owner", "0: delete_config[leaf_ip]['port_vlan'].\\ append((topology['leaf_ports'], vlan_list)) # Check does spine need to delete vlan. for", "= cfg.CONF.ml2_hp.rpc_backend.lower() self.sync_helper = None self.rpc_clients = {} def initialize(self): \"\"\" MechanismDriver will", "\" \"already inserted into the network %s.\"), str(port_id), str(device_id), str(network_id)) return LOG.info(_(\"Insert port", "device. \"\"\" LOG.info(_(\"Create network postcommit begin.\")) network = context.current network_id = network['id'] tenant_id", "vlan config %s success for %s.\"), port_vlan_tuple_list, dev_ip) else: LOG.warn(_(\"Failed to delete vlan", "whose IP is %s in \" \"the configuration file.\"), str(device_ip)) return client def", "Device owner is %s.\"), device_owner) if not (device_owner.startswith('compute') or device_owner == n_const.DEVICE_OWNER_DHCP): LOG.info(_(\"Ignore", "is created, we do real operations in our physical device. \"\"\" LOG.info(_(\"Create network", "host in this network vm_count = db.get_vm_count(network_id, host_id) if vm_count == 1: LOG.info(_(\"Delete", "\"timeout %d, rpc backend %s\"), self.leaf_topology, self.spine_topology, self.username, self.password, self.url_schema, self.sync_timeout, self.rpc_backend) #", "only if it is the last vm of host in this network vm_count", "in device_config_dict: device_config_dict.setdefault(spine_ip, {}) device_config_dict[spine_ip].setdefault('port_vlan', []) device_config_dict[spine_ip]['vlan_create'] = vlan_list device_config_dict[spine_ip]['port_vlan'].\\ append((topology['spine_ports'], spine_vlan_list)) if", "in delete_config: leaf_ref_vlans[leaf_ip] -= set([vlan_id]) delete_config[leaf_ip]['vlan_del'] = [vlan_id] # Check which spine device", "dev_ip) else: LOG.warn(_(\"Failed to port trunk %s for %s\"), port_vlan_tuple_list, dev_ip) def delete_port_precommit(self,", "rpc_client is not None: LOG.info(_(\"Begin create vlan network: device %s, \" \"create vlan", "= {} for leaf_ip, topology in leaf_generator: leaf_ref_vlans.setdefault(leaf_ip, set([])) leaf_ref_host.setdefault(leaf_ip, False) host =", "delete_config: delete_config[spine_ip]['vlan_del'] = [vlan_id] LOG.info(_(\"Delete configuration : %s\"), delete_config) return delete_config def delete_vlan_config(self,", "is used to count the host number in same network # with leafs", "leaf and spine device.\"\"\" for leaf in self.leaf_topology: rest_client = restful_cfg.RestfulCfg(leaf['ip'], self.username, self.password)", "network type %s\"), segment_type) else: LOG.info(_(\"Physical switch has already configured. \" \"There are" ]
[ "- zstop))) + (src_mag / 2) * (z * sp.exp1(sigma_a * z +", "as np import os import scipy.special as sp import sys MY_DIR = os.path.dirname(__file__)", "plt import numpy as np import os import scipy.special as sp import sys", "sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import", "# noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return", "'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a", "def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a =", "of save plot') args = parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv)", "sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true',", "description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save", "= os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from", "z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux", "sigma_a, zstop): EPS = 1e-25 return ((src_mag / sigma_a) - (src_mag / (2", "= deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop", "deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv is None: argv", "import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS =", "deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def", "import argparse import matplotlib.pyplot as plt import numpy as np import os import", "sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic')", "show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag =", "(src_mag / 2) * (z * sp.exp1(sigma_a * z + EPS) + (zstop", "plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck)", "src_mag, sigma_a, zstop def parse_args(argv): if argv is None: argv = sys.argv[1:] parser", "matplotlib.pyplot as plt import numpy as np import os import scipy.special as sp", "coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh", "+ EPS) + (zstop - z) * sp.exp1(sigma_a * (zstop - z) +", "+ (src_mag / 2) * (z * sp.exp1(sigma_a * z + EPS) +", "* (z - zstop))) + (src_mag / 2) * (z * sp.exp1(sigma_a *", "z + EPS) + (zstop - z) * sp.exp1(sigma_a * (zstop - z)", "deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv", "scipy.special as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input,", "plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a,", "#!/usr/bin/env python3 import argparse import matplotlib.pyplot as plt import numpy as np import", "plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def", "(z - zstop))) + (src_mag / 2) * (z * sp.exp1(sigma_a * z", "* sp.exp1(sigma_a * (zstop - z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag,", "solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag / sigma_a) - (src_mag", "'--show', action='store_true', help='show instead of save plot') args = parser.parse_args(argv) return args def", "* (np.exp(-sigma_a * z) + np.exp(sigma_a * (z - zstop))) + (src_mag /", "# noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def", "flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot') args", "return ((src_mag / sigma_a) - (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a *", "zstop): EPS = 1e-25 return ((src_mag / sigma_a) - (src_mag / (2 *", "((src_mag / sigma_a) - (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z)", "plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh =", "mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return", "action='store_true', help='show instead of save plot') args = parser.parse_args(argv) return args def main(argv=None):", "- (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a *", "def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag,", "save plot') args = parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show,", "import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa:", "= 1e-25 return ((src_mag / sigma_a) - (src_mag / (2 * sigma_a)) *", "sigma_a) - (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a", "z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z,", "(src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a * (z", "(2 * sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a * (z - zstop)))", "= solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem}", "= get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z", "parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end", "E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag /", "instead of save plot') args = parser.parse_args(argv) return args def main(argv=None): args =", "help='show instead of save plot') args = parser.parse_args(argv) return args def main(argv=None): args", "plot') args = parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab')", "if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux", "/ (2 * sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a * (z -", "2) * (z * sp.exp1(sigma_a * z + EPS) + (zstop - z)", "sp.exp1(sigma_a * z + EPS) + (zstop - z) * sp.exp1(sigma_a * (zstop", "label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck", "flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag", "EPS) + (zstop - z) * sp.exp1(sigma_a * (zstop - z) + EPS)))", "as plt import numpy as np import os import scipy.special as sp import", "* z) + np.exp(sigma_a * (z - zstop))) + (src_mag / 2) *", "z) + np.exp(sigma_a * (z - zstop))) + (src_mag / 2) * (z", "(zstop - z) * sp.exp1(sigma_a * (zstop - z) + EPS))) def plot_analytic_flux(show,", "parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab') if __name__ ==", "args = parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab') if", "sigma_a, zstop def parse_args(argv): if argv is None: argv = sys.argv[1:] parser =", "src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag / sigma_a) - (src_mag /", "formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot') args = parser.parse_args(argv) return", "E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag,", "sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh #", "= argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead", "np import os import scipy.special as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..')", "* z + EPS) + (zstop - z) * sp.exp1(sigma_a * (zstop -", "src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a,", "= deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if", "plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag / sigma_a)", "np.exp(sigma_a * (z - zstop))) + (src_mag / 2) * (z * sp.exp1(sigma_a", "argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s',", "parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot') args = parser.parse_args(argv) return args", "args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab') if __name__ == '__main__': main()", "as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh", "argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of", "narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save #", "plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml'])", "sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv):", "create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge, src_mag,", "MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}')", "sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem,", "/ 2) * (z * sp.exp1(sigma_a * z + EPS) + (zstop -", "analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot')", "- z) * sp.exp1(sigma_a * (zstop - z) + EPS))) def plot_analytic_flux(show, problem):", "+ (zstop - z) * sp.exp1(sigma_a * (zstop - z) + EPS))) def", "is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml',", "zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux')", "numpy as np import os import scipy.special as sp import sys MY_DIR =", "mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv is None: argv = sys.argv[1:]", "parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show", "noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag", "os import scipy.special as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows", "def parse_args(argv): if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot", "(np.exp(-sigma_a * z) + np.exp(sigma_a * (z - zstop))) + (src_mag / 2)", "os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility", "* (z * sp.exp1(sigma_a * z + EPS) + (zstop - z) *", "EPS = 1e-25 return ((src_mag / sigma_a) - (src_mag / (2 * sigma_a))", "utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS", "return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab') if __name__ == '__main__':", "flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$')", "parse_args(argv): if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic", "= create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop = deck.reg['reg1'].end return mesh.edge,", "flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem):", "- z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem)", "(z * sp.exp1(sigma_a * z + EPS) + (zstop - z) * sp.exp1(sigma_a", "sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a,", "zstop def parse_args(argv): if argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser(", "/ sigma_a) - (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a * z) +", "python3 import argparse import matplotlib.pyplot as plt import numpy as np import os", "import os import scipy.special as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from", "zstop))) + (src_mag / 2) * (z * sp.exp1(sigma_a * z + EPS)", "src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show,", "plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux') show_or_save(show, problem, 'analytic_flux') def get_parameters_for(problem): deck =", "None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter)", "<reponame>pozulp/narrows<gh_stars>1-10 #!/usr/bin/env python3 import argparse import matplotlib.pyplot as plt import numpy as np", "from narrows import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save", "argv is None: argv = sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for", "+ EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux =", "return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv is None: argv =", "for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot') args =", "zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend()", "import numpy as np import os import scipy.special as sp import sys MY_DIR", "EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z,", "z) * sp.exp1(sigma_a * (zstop - z) + EPS))) def plot_analytic_flux(show, problem): z,", "create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle')", "get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate')", "def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25 return ((src_mag / sigma_a) -", "problem): z, src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop)", "1e-25 return ((src_mag / sigma_a) - (src_mag / (2 * sigma_a)) * (np.exp(-sigma_a", "sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a * (z - zstop))) + (src_mag", "deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop", "= sys.argv[1:] parser = argparse.ArgumentParser( description='Plot analytic flux for full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show',", "= deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv is None:", "* sp.exp1(sigma_a * z + EPS) + (zstop - z) * sp.exp1(sigma_a *", "import matplotlib.pyplot as plt import numpy as np import os import scipy.special as", "* (zstop - z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop", "get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a", "show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop): EPS = 1e-25", "solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux, label='analytic') plt.legend() plt.xlabel('z coordinate') plt.ylabel(r'$\\phi(z)$') plt.title(f'{problem} flux')", "+ np.exp(sigma_a * (z - zstop))) + (src_mag / 2) * (z *", "import scipy.special as sp import sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import", "noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z,", "full_slab.yaml', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-s', '--show', action='store_true', help='show instead of save plot') args = parser.parse_args(argv)", "from utility import show_or_save # noqa: E402 plt.style.use(f'{MY_DIR}/style.mplstyle') def solution(z, src_mag, sigma_a, zstop):", "import parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa:", "src_mag, sigma_a, zstop = get_parameters_for(problem) flux = solution(z, src_mag, sigma_a, zstop) plt.plot(z, flux,", "(zstop - z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a, zstop =", "= parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude sigma_a = deck.mat['mat1'].sigma_a zstop =", "= parser.parse_args(argv) return args def main(argv=None): args = parse_args(argv) plot_analytic_flux(args.show, f'{MY_DIR}/full_slab') if __name__", "parse_input, create_mesh # noqa: E402 sys.path.append(f'{MY_DIR}') from utility import show_or_save # noqa: E402", "* sigma_a)) * (np.exp(-sigma_a * z) + np.exp(sigma_a * (z - zstop))) +", "sys MY_DIR = os.path.dirname(__file__) sys.path.append(f'{MY_DIR}/..') from narrows import parse_input, create_mesh # noqa: E402", "problem, 'analytic_flux') def get_parameters_for(problem): deck = parse_input([f'{problem}.yaml']) mesh = create_mesh(deck) src_mag = deck.src['src1'].magnitude", "argparse import matplotlib.pyplot as plt import numpy as np import os import scipy.special", "zstop = deck.reg['reg1'].end return mesh.edge, src_mag, sigma_a, zstop def parse_args(argv): if argv is", "sp.exp1(sigma_a * (zstop - z) + EPS))) def plot_analytic_flux(show, problem): z, src_mag, sigma_a," ]
[ "Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth,", "as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else: print('ERROR: unable to", "WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import Device if __name__ == '__main__':", "gps_tracker.device import Device if __name__ == '__main__': auth = Auth() device = Device('WaveShare", "Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled", "import Device if __name__ == '__main__': auth = Auth() device = Device('WaveShare Device',", "auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled =", "serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps,", "from gps_tracker.device import Device if __name__ == '__main__': auth = Auth() device =", "main from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator", "from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from", "'__main__': auth = Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn:", "from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import Device if", "from gps_tracker.runner import main from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from", "Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps:", "SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth", "gps_tracker.auth import Auth from gps_tracker.device import Device if __name__ == '__main__': auth =", "auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else: print('ERROR:", "__name__ == '__main__': auth = Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE)", "with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device)", "import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import", "Device if __name__ == '__main__': auth = Auth() device = Device('WaveShare Device', auth)", "import Auth from gps_tracker.device import Device if __name__ == '__main__': auth = Auth()", "device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn)", "as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled:", "enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else: print('ERROR: unable to initialize GPS.')", "gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import Device if __name__", "auth = Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with", "with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps()", "gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth", "WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import Device", "Auth from gps_tracker.device import Device if __name__ == '__main__': auth = Auth() device", "= Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id,", "SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if", "serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else: print('ERROR: unable", "ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else: print('ERROR: unable to initialize", "= Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as serial_conn: with WaveShareGPSLocator(device.id, auth, serial_conn) as", "WaveShareGPSLocator(device.id, auth, serial_conn) as ws_gps: enabled = ws_gps.wait_for_gps() if enabled: main(ws_gps, device) else:", "import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import", "from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from", "gps_tracker.runner import main from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator", "if __name__ == '__main__': auth = Auth() device = Device('WaveShare Device', auth) with", "<reponame>cezaryzelisko/gps-tracker<filename>examples/wave_share_gps_locator_runner.py from gps_tracker.runner import main from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS", "== '__main__': auth = Auth() device = Device('WaveShare Device', auth) with SerialConnection(WaveShareGPS.DEFAULT_DEVICE) as", "import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device import Device if __name__ ==", "import main from gps_tracker.serial_connection import SerialConnection from gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import", "gps_tracker.wave_share_config import WaveShareGPS from gps_tracker.wave_share_gps_locator import WaveShareGPSLocator from gps_tracker.auth import Auth from gps_tracker.device", "from gps_tracker.auth import Auth from gps_tracker.device import Device if __name__ == '__main__': auth" ]
[ "def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines =", "= lines[0].strip() password = lines[1].strip() s = Session() s.login(username, password) return s def", "as f: lines = f.readlines() username = lines[0].strip() password = lines[1].strip() s =", "os from pyfortune.session import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path,", "with open(secrets_path, 'r') as f: lines = f.readlines() username = lines[0].strip() password =", "Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines", "= Session() s.login(username, password) return s def get_session(): s = Session() return s", "= os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines = f.readlines() username =", "get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines = f.readlines()", "os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines = f.readlines() username = lines[0].strip()", "'secrets') with open(secrets_path, 'r') as f: lines = f.readlines() username = lines[0].strip() password", "username = lines[0].strip() password = lines[1].strip() s = Session() s.login(username, password) return s", "<gh_stars>0 import os from pyfortune.session import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets')", "= f.readlines() username = lines[0].strip() password = lines[1].strip() s = Session() s.login(username, password)", "lines = f.readlines() username = lines[0].strip() password = lines[1].strip() s = Session() s.login(username,", "open(secrets_path, 'r') as f: lines = f.readlines() username = lines[0].strip() password = lines[1].strip()", "f: lines = f.readlines() username = lines[0].strip() password = lines[1].strip() s = Session()", "from pyfortune.session import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r')", "'r') as f: lines = f.readlines() username = lines[0].strip() password = lines[1].strip() s", "f.readlines() username = lines[0].strip() password = lines[1].strip() s = Session() s.login(username, password) return", "lines[0].strip() password = lines[1].strip() s = Session() s.login(username, password) return s def get_session():", "= lines[1].strip() s = Session() s.login(username, password) return s def get_session(): s =", "lines[1].strip() s = Session() s.login(username, password) return s def get_session(): s = Session()", "import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f:", "password = lines[1].strip() s = Session() s.login(username, password) return s def get_session(): s", "s = Session() s.login(username, password) return s def get_session(): s = Session() return", "pyfortune.session import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as", "import os from pyfortune.session import Session def get_login_session(): secrets_path = os.environ.get('SECRETS', 'secrets') with", "secrets_path = os.environ.get('SECRETS', 'secrets') with open(secrets_path, 'r') as f: lines = f.readlines() username" ]
[ "{ \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray = {", "\"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return", "word == nigga + \"true\\n\": return True elif word == nigga + \"false\\n\":", "соответствует true или false\", \"Проверьте ваш файл settings.txt\") # если такая настройка не", "\"requests-counter\": \"true\" } numArray = { \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName):", "+ \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile()", "\"\\\" не соответствует true или false\", \"Проверьте ваш файл settings.txt\") # если такая", "= settingName + \" = \" if word.startswith(nigga): file.close() if word == nigga", "# проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку for word", "file.close() if word == nigga + \"true\\n\": return True elif word == nigga", "file = open(\"settings.txt\", \"r+\") # ищем настройку for word in file: nigga =", "if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: # если значение не число", "helper import utils.system as sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\":", "\\\"\" + settingName + \"\\\" не является числом\") # если такая настройка не", "except: # если значение не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не", "\\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName])", "если такая настройка не найдена в файле print(\"Не удалось найти значение \\\"\" +", "file.close() return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName)", "найдена print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName +", "Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName)", "+ \" = \" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError:", "return False # если значение не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\"", "settingName + \" = \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except:", "open(\"settings.txt\", \"r+\") # ищем настройку for word in file: nigga = settingName +", "значение не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является числом\") #", "def getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку", "\"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray = { \"delay\": 1, \"ping-delay\": 5", "false\", \"Проверьте ваш файл settings.txt\") # если такая настройка не найдена в файле", "Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName)", "sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" }", "file: nigga = settingName + \" = \" if word.startswith(nigga): file.close() if word", "False # если значение не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не", "sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является числом\") # если такая настройка", "удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \" =", "as helper import utils.system as sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\",", "\" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: # если значение не", "найдена в файле print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\")", "int(word.replace(nigga, \"\")) except: # если значение не число sys.crash(\"Настрйка \\\"\" + settingName +", "значение не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует true или", "# если такая настройка не найдена в файле print(\"Не удалось найти значение \\\"\"", "\" = \" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: #", "= settingName + \" = \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\"))", "\" = \" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: # если", "as sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\"", "if word == nigga + \"true\\n\": return True elif word == nigga +", "+ \"\\n\") file.close() return getNum(settingName) except: # если файла settings.txt нет helper.createSettingsFile() return", "\"r+\") # ищем настройку for word in file: nigga = settingName + \"", "ваш файл settings.txt\") # если такая настройка не найдена в файле print(\"Не удалось", "helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку for word in file:", "\"\\\"! Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName]) + \"\\n\") file.close() return", "file: nigga = settingName + \" = \" if word.startswith(nigga): file.close() try: return", "если такая настройка не найдена print(\"Не удалось найти значение \\\"\" + settingName +", "booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray", "booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет", "== nigga + \"true\\n\": return True elif word == nigga + \"false\\n\": return", "\"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray = { \"delay\":", "+ \"false\\n\": return False # если значение не булевое sys.crash(\"Настройка \\\"\" + settingName", "= \" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: # если файла", "= \" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если", "\"time\": \"false\", \"requests-counter\": \"true\" } numArray = { \"delay\": 1, \"ping-delay\": 5 }", "= open(\"settings.txt\", \"r+\") # ищем настройку for word in file: nigga = settingName", "# -*- coding: utf-8 -*- import utils.settingsHelper as helper import utils.system as sys", "nigga = settingName + \" = \" if word.startswith(nigga): file.close() if word ==", "return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") #", "+ \"\\\"! Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName] + \"\\n\") file.close()", "\"false\\n\": return False # если значение не булевое sys.crash(\"Настройка \\\"\" + settingName +", "settingName + \"\\\" не соответствует true или false\", \"Проверьте ваш файл settings.txt\") #", "настройка не найдена в файле print(\"Не удалось найти значение \\\"\" + settingName +", "+ settingName + \"\\\" не соответствует true или false\", \"Проверьте ваш файл settings.txt\")", "utils.settingsHelper as helper import utils.system as sys booleanArray = { \"autoclick\": \"false\", \"ping\":", "файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file", "\"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName) try: file =", "settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName]) + \"\\n\")", "utf-8 -*- import utils.settingsHelper as helper import utils.system as sys booleanArray = {", "word in file: nigga = settingName + \" = \" if word.startswith(nigga): file.close()", "\"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray = { \"delay\": 1,", "word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: # если значение не число sys.crash(\"Настрйка", "проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку for word in", "in file: nigga = settingName + \" = \" if word.startswith(nigga): file.close() if", "\"false\", \"requests-counter\": \"true\" } numArray = { \"delay\": 1, \"ping-delay\": 5 } def", "\"true\" } numArray = { \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): #", "settingName + \" = \" if word.startswith(nigga): file.close() if word == nigga +", "return True elif word == nigga + \"false\\n\": return False # если значение", "= { \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName) try:", "нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\",", "является числом\") # если такая настройка не найдена print(\"Не удалось найти значение \\\"\"", "nigga + \"false\\n\": return False # если значение не булевое sys.crash(\"Настройка \\\"\" +", "не найдена print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName", "settings.txt\") # если такая настройка не найдена в файле print(\"Не удалось найти значение", "число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является числом\") # если такая", "значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" +", "числом\") # если такая настройка не найдена print(\"Не удалось найти значение \\\"\" +", "+ str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: # если файла settings.txt нет", "= \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: # если значение", "word == nigga + \"false\\n\": return False # если значение не булевое sys.crash(\"Настройка", "настройка не найдена print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\")", "-*- import utils.settingsHelper as helper import utils.system as sys booleanArray = { \"autoclick\":", "1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\",", "# если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName)", "такая настройка не найдена в файле print(\"Не удалось найти значение \\\"\" + settingName", "# если значение не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является", "in file: nigga = settingName + \" = \" if word.startswith(nigga): file.close() try:", "def getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку", "\\\"\" + settingName + \"\\\" не соответствует true или false\", \"Проверьте ваш файл", "5 } def getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") #", "} numArray = { \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем", "import utils.system as sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\",", "найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \"", "getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку for", "\" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: # если файла settings.txt", "\"\")) except: # если значение не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\"", "+ \"\\\"! Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName]) + \"\\n\") file.close()", "если значение не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует true", "try: file = open(\"settings.txt\", \"r+\") # ищем настройку for word in file: nigga", "getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName):", "\"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\")", "file.close() try: return int(word.replace(nigga, \"\")) except: # если значение не число sys.crash(\"Настрйка \\\"\"", "file.write(settingName + \" = \" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except", "} def getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем", "settingName + \"\\\" не является числом\") # если такая настройка не найдена print(\"Не", "sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует true или false\", \"Проверьте ваш", "если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try:", "settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file =", "+ \"\\\" не соответствует true или false\", \"Проверьте ваш файл settings.txt\") # если", "nigga = settingName + \" = \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga,", "+ \"\\\" не является числом\") # если такая настройка не найдена print(\"Не удалось", "\" if word.startswith(nigga): file.close() if word == nigga + \"true\\n\": return True elif", "\"\\\" не является числом\") # если такая настройка не найдена print(\"Не удалось найти", "elif word == nigga + \"false\\n\": return False # если значение не булевое", "str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: # если файла settings.txt нет helper.createSettingsFile()", "такая настройка не найдена print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"!", "FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем", "не соответствует true или false\", \"Проверьте ваш файл settings.txt\") # если такая настройка", "== nigga + \"false\\n\": return False # если значение не булевое sys.crash(\"Настройка \\\"\"", "булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует true или false\", \"Проверьте", "файл settings.txt\") # если такая настройка не найдена в файле print(\"Не удалось найти", "for word in file: nigga = settingName + \" = \" if word.startswith(nigga):", "+ settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName] +", "# ищем настройку for word in file: nigga = settingName + \" =", "= { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray =", "{ \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName) try: file", "import utils.settingsHelper as helper import utils.system as sys booleanArray = { \"autoclick\": \"false\",", "getBoolean(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем настройку for", "не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует true или false\",", "\" + booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если файла", "True elif word == nigga + \"false\\n\": return False # если значение не", "getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\") # ищем", "не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является числом\") # если", "файле print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName +", "true или false\", \"Проверьте ваш файл settings.txt\") # если такая настройка не найдена", "\"\\n\") file.close() return getNum(settingName) except: # если файла settings.txt нет helper.createSettingsFile() return getNum(settingName)", "nigga + \"true\\n\": return True elif word == nigga + \"false\\n\": return False", "не найдена в файле print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"!", "= \" if word.startswith(nigga): file.close() if word == nigga + \"true\\n\": return True", "\"Проверьте ваш файл settings.txt\") # если такая настройка не найдена в файле print(\"Не", "+ settingName + \"\\\" не является числом\") # если такая настройка не найдена", "+ \" = \" if word.startswith(nigga): file.close() if word == nigga + \"true\\n\":", "+ \" = \" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except: #", "\"\\\"! Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName] + \"\\n\") file.close() return", "# если такая настройка не найдена print(\"Не удалось найти значение \\\"\" + settingName", "print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \"", "не является числом\") # если такая настройка не найдена print(\"Не удалось найти значение", "\\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName]", "utils.system as sys booleanArray = { \"autoclick\": \"false\", \"ping\": \"true\", \"time\": \"false\", \"requests-counter\":", "или false\", \"Проверьте ваш файл settings.txt\") # если такая настройка не найдена в", "\" = \" if word.startswith(nigga): file.close() if word == nigga + \"true\\n\": return", "ищем настройку for word in file: nigga = settingName + \" = \"", "настройку for word in file: nigga = settingName + \" = \" if", "в файле print(\"Не удалось найти значение \\\"\" + settingName + \"\\\"! Записываем...\") file.write(settingName", "except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): #", "-*- coding: utf-8 -*- import utils.settingsHelper as helper import utils.system as sys booleanArray", "# если значение не булевое sys.crash(\"Настройка \\\"\" + settingName + \"\\\" не соответствует", "если значение не число sys.crash(\"Настрйка \\\"\" + settingName + \"\\\" не является числом\")", "helper.createSettingsFile() return getBoolean(settingName) def getNum(settingName): # проверяем helper.check(settingName) try: file = open(\"settings.txt\", \"r+\")", "file.write(settingName + \" = \" + str(numArray[settingName]) + \"\\n\") file.close() return getNum(settingName) except:", "try: return int(word.replace(nigga, \"\")) except: # если значение не число sys.crash(\"Настрйка \\\"\" +", "word.startswith(nigga): file.close() if word == nigga + \"true\\n\": return True elif word ==", "+ \"true\\n\": return True elif word == nigga + \"false\\n\": return False #", "+ \" = \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: #", "+ booleanArray[settingName] + \"\\n\") file.close() return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt", "\"ping\": \"true\", \"time\": \"false\", \"requests-counter\": \"true\" } numArray = { \"delay\": 1, \"ping-delay\":", "return int(word.replace(nigga, \"\")) except: # если значение не число sys.crash(\"Настрйка \\\"\" + settingName", "if word.startswith(nigga): file.close() if word == nigga + \"true\\n\": return True elif word", "settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + booleanArray[settingName] + \"\\n\")", "<filename>jewtrick/utils/settings.py # -*- coding: utf-8 -*- import utils.settingsHelper as helper import utils.system as", "numArray = { \"delay\": 1, \"ping-delay\": 5 } def getBoolean(settingName): # проверяем helper.check(settingName)", "return getBoolean(settingName) except FileNotFoundError: # если файла settings.txt нет helper.createSettingsFile() return getBoolean(settingName) def", "+ settingName + \"\\\"! Записываем...\") file.write(settingName + \" = \" + str(numArray[settingName]) +", "\"true\\n\": return True elif word == nigga + \"false\\n\": return False # если", "coding: utf-8 -*- import utils.settingsHelper as helper import utils.system as sys booleanArray =", "\" = \" if word.startswith(nigga): file.close() try: return int(word.replace(nigga, \"\")) except: # если" ]
[ "setuptools import setup, find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios',", "from setuptools import setup, find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(),", "find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI,", "setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact with e.sios", "['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact with", "PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.',", "import setup, find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT',", "<reponame>gisce/esios from setuptools import setup, find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2',", "= {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>',", "packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact with e.sios API', package_data=PACKAGES_DATA, )", "version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact with e.sios API', package_data=PACKAGES_DATA,", "name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact with e.sios API',", "{'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas'], author='GISCE-TI, S.L.', author_email='<EMAIL>', description='Interact", "setup, find_packages PACKAGES_DATA = {'esios': ['data/*.xsd']} setup( name='esios', version='0.12.2', packages=find_packages(), url='https://github.com/gisce/esios', license='MIT', install_requires=['libsaas']," ]
[ "packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random", "setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents:", "setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA", "project of QARMA non-permanents: deforesting random forest using OMP.', author='QARMA team', license='MIT', )", "\"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random forest using OMP.', author='QARMA", "setuptools import find_packages, setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro", "import find_packages, setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project", "version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random forest using OMP.', author='QARMA team',", "<filename>setup.py<gh_stars>1-10 from setuptools import find_packages, setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"},", "description='Bolsonaro project of QARMA non-permanents: deforesting random forest using OMP.', author='QARMA team', license='MIT',", "exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random forest", "package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random forest using OMP.',", "find_packages, setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of", "from setuptools import find_packages, setup setup( name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0',", "'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting random forest using", "name='bolsonaro', packages=find_packages(where=\"code\", exclude=['doc', 'dev']), package_dir={'': \"code\"}, version='0.1.0', description='Bolsonaro project of QARMA non-permanents: deforesting" ]
[ "and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields =", "Group Template` SET template_or_new_line = 'Add New Line' WHERE template_or_new_line = 'Add new", "rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): #", "import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'):", "'Lab Test Groups': 'Lab Test Group Template', 'Normal Test Items': 'Normal Test Result',", "from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab", "'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive", "frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = {", "Test Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test", "frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template':", "'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' }", "Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test Result',", "'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and", "Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for", "if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes", "%(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test',", "parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test',", "frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test", "doctypes doctypes = { 'Lab Test Groups': 'Lab Test Group Template', 'Normal Test", "Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): #", "'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix", "Result', 'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype',", "from __future__ import unicode_literals import frappe from frappe.model.utils.rename_field import rename_field def execute(): if", "rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select", "import unicode_literals import frappe from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab", "Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype,", "force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group Template':", "{ 'Lab Test Groups': 'Lab Test Group Template', 'Normal Test Items': 'Normal Test", "Test Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET", "\"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'):", "Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype',", "frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes doctypes", "Test Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare',", "UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare',", "'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield in", "# rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle',", "'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType',", "Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt,", "if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType',", "frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template')", "'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test", "if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype',", "'Descriptive Test Result', 'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test')", "Test Result', 'Special Test Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive Test", "'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line = 'Add New", "Template'): # rename child doctypes doctypes = { 'Lab Test Groups': 'Lab Test", "child doctypes doctypes = { 'Lab Test Groups': 'Lab Test Group Template', 'Normal", "frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'):", "'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive", "Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test", "Test Template'): # rename child doctypes doctypes = { 'Lab Test Groups': 'Lab", "Group Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test", "Test Group Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity", "new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group", "Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result':", "'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select field option", "field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line", "'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt)", "'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE", "frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line = 'Add New Line' WHERE", "'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test Result', 'Special", "Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test", "parent_fields = { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal", "frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if not", "old_dt) parent_fields = { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates',", "frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test", "{ 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result':", "Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items():", "and frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes doctypes = { 'Lab", "doctypes = { 'Lab Test Groups': 'Lab Test Group Template', 'Normal Test Items':", "fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template`", "} frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if", "Test Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test Result', 'Special Test", "'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items'", "for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt,", "def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename", "frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes doctypes = { 'Lab Test", "`tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype',", "'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items',", "if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group", "old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True)", "Test Result', 'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare',", "= { 'Lab Test Groups': 'Lab Test Group Template', 'Normal Test Items': 'Normal", "rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle')", "'Normal Test Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special", "Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\"", "Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result',", "Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result':", "doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield':", "Test Group Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE", "'Special Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template')", "'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive", "option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line =", "Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test", "Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test", "Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity", "'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes doctypes =", "doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt))", "in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype',", "# rename child doctypes doctypes = { 'Lab Test Groups': 'Lab Test Group", "Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items():", "'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab", "for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype),", "execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename child", "'Lab Test Group Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\"", "parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield})", "Test Template': 'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for", "Test Group Template` SET template_or_new_line = 'Add New Line' WHERE template_or_new_line = 'Add", "unicode_literals import frappe from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test')", "parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if", "'Lab Test Group Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity Test Items':", "import frappe from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and", "<reponame>Mindhome/field_service<gh_stars>1-10 from __future__ import unicode_literals import frappe from frappe.model.utils.rename_field import rename_field def execute():", "= { 'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test", "'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield =", "Template` SET template_or_new_line = 'Add New Line' WHERE template_or_new_line = 'Add new line'", "Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select field", "frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if frappe.db.exists('DocType',", "'Special Test Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive Test Template' }", "not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt)", "SET template_or_new_line = 'Add New Line' WHERE template_or_new_line = 'Add new line' \"\"\")", "frappe from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType', 'Lab Test') and frappe.db.exists('DocType',", "'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line = 'Add New Line'", "'Descriptive Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt", "frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields", "Group Template'): # fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab", "= %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab", "# fix select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group", "__future__ import unicode_literals import frappe from frappe.model.utils.rename_field import rename_field def execute(): if frappe.db.exists('DocType',", "frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab", "'Lab Test Template'): # rename child doctypes doctypes = { 'Lab Test Groups':", "} for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s", "old_dt, new_dt, force=True) frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test", "parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename", "'descriptive_toggle') if frappe.db.exists('DocType', 'Lab Test Group Template'): # fix select field option frappe.reload_doc('healthcare',", "select field option frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET", "'Lab Test Group Template': 'lab_test_groups', 'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items',", "rename child doctypes doctypes = { 'Lab Test Groups': 'Lab Test Group Template',", "{'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab", "new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt, new_dt, force=True) frappe.reload_doc('healthcare',", "field frappe.reload_doc('healthcare', 'doctype', 'lab_test') if frappe.db.has_column('Lab Test', 'special_toggle'): rename_field('Lab Test', 'special_toggle', 'descriptive_toggle') if", "frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field", "in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) #", "frappe.reload_doc('healthcare', 'doctype', frappe.scrub(new_dt)) frappe.delete_doc_if_exists('DocType', old_dt) parent_fields = { 'Lab Test Group Template': 'lab_test_groups',", "'lab_test_template') for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt): frappe.rename_doc('DocType', old_dt,", "frappe.reload_doc('healthcare', 'doctype', 'lab_test_group_template') frappe.db.sql(\"\"\" UPDATE `tabLab Test Group Template` SET template_or_new_line = 'Add", "Test') and frappe.db.exists('DocType', 'Lab Test Template'): # rename child doctypes doctypes = {", "Groups': 'Lab Test Group Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity Test", "'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield", "'Descriptive Test Template': 'descriptive_test_templates', 'Normal Test Result': 'normal_test_items', 'Sensitivity Test Result': 'sensitivity_test_items', 'Descriptive", "SET parentfield = %(parentfield)s \"\"\".format(doctype), {'parentfield': parentfield}) # rename field frappe.reload_doc('healthcare', 'doctype', 'lab_test')", "UPDATE `tabLab Test Group Template` SET template_or_new_line = 'Add New Line' WHERE template_or_new_line", "Test Groups': 'Lab Test Group Template', 'Normal Test Items': 'Normal Test Result', 'Sensitivity", "Result', 'Special Test Items': 'Descriptive Test Result', 'Special Test Template': 'Descriptive Test Template'", "'Descriptive Test Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}`", "`tabLab Test Group Template` SET template_or_new_line = 'Add New Line' WHERE template_or_new_line =", "Items': 'Sensitivity Test Result', 'Special Test Items': 'Descriptive Test Result', 'Special Test Template':", "Test Template' } frappe.reload_doc('healthcare', 'doctype', 'lab_test') frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in", "Items': 'Normal Test Result', 'Sensitivity Test Items': 'Sensitivity Test Result', 'Special Test Items':", "Result': 'descriptive_test_items' } for doctype, parentfield in parent_fields.items(): frappe.db.sql(\"\"\" UPDATE `tab{0}` SET parentfield", "frappe.reload_doc('healthcare', 'doctype', 'lab_test_template') for old_dt, new_dt in doctypes.items(): if not frappe.db.table_exists(new_dt) and frappe.db.table_exists(old_dt):" ]
[ "# Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL", "KIND, either express or implied. # See the License for the specific language", "200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url", "Unless required by applicable law or agreed to in writing, software # distributed", "# Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of", "Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of any", "utf-8 -*- # (C) Copyright IBM Corp. 2020. # # Licensed under the", "json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances", "by calling from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model !=", "model instance back to dict and verify no loss of data security_level_setting_resp_model_json2 =", "a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model", "of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2", "#----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response", "class TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response will", "security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #--------------------------------------------------------", "get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock", "test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url +", "= 1 result_info_model_json['total_count'] = 200 # Construct a model instance of ResultInfo by", "= ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent assert result_info_model == result_info_model2", "dict and verify no loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 ==", "if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError):", "operation assert len(responses.calls) == 1 assert response.status_code == 200 # Validate body params", "security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back to dict and verify no", "import inspect import json import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import", "with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): #", "this file except in compliance with the License. # You may obtain a", "security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a model instance of", "Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #--------------------------------------------------------", "test_result_info_serialization(self): # Construct a json representation of a ResultInfo model result_info_model_json = {}", "= 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium'", "limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import", "'{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\":", "None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate", "true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke", "# Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json", "def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json =", "security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' #", "dict and verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 ==", "calling from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) #", "any model objects needed in order to build this model. result_info_model = {}", "import * crn = 'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn,", "ensure the mock response will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+',", "# ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] =", "= 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK'", "key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy)", "# Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200", "# Invoke method response = service.set_security_level_setting() # Check for correct operation assert len(responses.calls)", "security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class", "from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier = 'testString' service = FirewallApiV1(", "############################################################################## # region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #--------------------------------------------------------", "security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent", "== security_level_setting_resp_result_model2 # Convert model instance back to dict and verify no loss", "param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ##############################################################################", "by calling from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict)", "security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## # End of", "ANY KIND, either express or implied. # See the License for the specific", "\"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in", "200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200)", "result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp", "(key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service:", "the model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance", "url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting() # Check for", "############################################################################## ############################################################################## # Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test", "assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## # End of Model Tests ##############################################################################", "test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url +", "# Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for", "#-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url =", "from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False #", "to dict and verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2", "[{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting()", "specific language governing permissions and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "\"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in all but", "Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL to", "1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): #", "= SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2", "2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct a model instance of", "== 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock", "True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance of", "TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a", "by calling from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model !=", "SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] =", "SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting():", "for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a SecurityLevelSettingRespResult", "{} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count']", "instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back to", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "to dict and verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2", "'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable']", "# Construct a json representation of a ResultInfo model result_info_model_json = {} result_info_model_json['page']", "1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\":", "Invoke method response = service.set_security_level_setting( value=value, headers={} ) # Check for correct operation", "# Construct a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id']", "Corp. 2020. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): #", "ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import pytest import re import responses", "from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False #", "for a ValueError req_param_dict = { } for param in req_param_dict.keys(): req_copy =", "OF ANY KIND, either express or implied. # See the License for the", "responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting() # Check", "assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set", "[{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one", "SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model", "# Convert model instance back to dict and verify no loss of data", "Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #--------------------------------------------------------", "test_security_level_setting_resp_serialization(self): # Construct dict forms of any model objects needed in order to", "(C) Copyright IBM Corp. 2020. # # Licensed under the Apache License, Version", "required param and check for a ValueError req_param_dict = { } for param", "model instance back to dict and verify no loss of data security_level_setting_resp_messages_item_model_json2 =", "#-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms", "\"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\":", "responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one required param", "security_level_setting_resp_model != False # Construct a model instance of SecurityLevelSettingResp by calling from_dict", "security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #--------------------------------------------------------", "assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult():", "= 'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on", "== 1 assert response.status_code == 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body,", "representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are", "model instances are equivalent assert result_info_model == result_info_model2 # Convert model instance back", "value=value, headers={} ) # Check for correct operation assert len(responses.calls) == 1 assert", "False # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the", "Convert model instance back to dict and verify no loss of data security_level_setting_resp_messages_item_model_json2", "SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_model", "= { } for param in req_param_dict.keys(): req_copy = {key:val if key is", "for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation of a ResultInfo", "model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1", "from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False #", "= '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1,", "Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model", "verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #-----------------------------------------------------------------------------", "security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp", "for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure the", "\"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url,", "{\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2,", "result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} #", "Invoke method response = service.get_security_level_setting() # Check for correct operation assert len(responses.calls) ==", "200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url", "[[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method response", "\"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method response =", "is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #--------------------------------------------------------", "Construct a json representation of a ResultInfo model result_info_model_json = {} result_info_model_json['page'] =", "= security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #-----------------------------------------------------------------------------", "re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- #", "in all but one required param and check for a ValueError req_param_dict =", "found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url else:", "security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on']", "SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a model instance of SecurityLevelSettingResp by", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "ValueError req_param_dict = { } for param in req_param_dict.keys(): req_copy = {key:val if", "security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test", ") # Check for correct operation assert len(responses.calls) == 1 assert response.status_code ==", "json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a model", "\"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in", "language governing permissions and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator", "= {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True", "security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class", "str): if re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+')", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a model instance of SecurityLevelSettingResp", "# Construct a model instance of SecurityLevelSettingResp by calling from_dict on the json", "of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #-----------------------------------------------------------------------------", "= 'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url", "#-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of any model objects needed in", "authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service:", "security_level_setting_resp_messages_item_model2 # Convert model instance back to dict and verify no loss of", "instance of ResultInfo by calling from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json)", "\"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]],", "SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self):", "test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {}", "{} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] =", "key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy)", "#-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation of a ResultInfo model result_info_model_json", "@responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response", "security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] =", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate", "SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 =", "request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self):", "params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #--------------------------------------------------------", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2", "body=mock_response, content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting() # Check for correct", "= SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent assert", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class", "for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a SecurityLevelSettingRespMessagesItem", "def test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response =", "= True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult by", "assert security_level_setting_resp_result_model != False # Construct a model instance of SecurityLevelSettingRespResult by calling", "values value = 'under_attack' # Invoke method response = service.set_security_level_setting( value=value, headers={} )", "== 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock", "result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 #", "to ensure the mock response will be found. def preprocess_url(self, request_url: str): if", "instance back to dict and verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict()", "a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK'", "True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation of a SecurityLevelSettingResp model", "required by applicable law or agreed to in writing, software # distributed under", "!= False # Construct a model instance of SecurityLevelSettingResp by calling from_dict on", "request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting()", "representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a model instance", "#-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')", "200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} #", "and verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json", "applicable law or agreed to in writing, software # distributed under the License", "= '2014-01-01T05:20:00.12345Z' # Construct a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json =", "SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a model instance of SecurityLevelSettingRespMessagesItem by", "\"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method", "representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances are", "'2014-01-01T05:20:00.12345Z' # Construct a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {}", "# Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #--------------------------------------------------------", "SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z'", "Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation of", "security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back to dict and verify no", "or agreed to in writing, software # distributed under the License is distributed", "TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response will be", "Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult", "= result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct", "req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting():", "the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model", "-*- # (C) Copyright IBM Corp. 2020. # # Licensed under the Apache", "result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class", "\"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "# Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the json", "url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\":", "req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val)", "1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct a", "Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation", "'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ##############################################################################", "mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\",", "of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #-----------------------------------------------------------------------------", "re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up", "\"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}'", "model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True", "#----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request", "TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response will be", "!= False # Construct a model instance of ResultInfo by calling from_dict on", "request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting()", "return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set", "1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response,", "value = 'under_attack' # Invoke method response = service.set_security_level_setting( value=value, headers={} ) #", "Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem", "<filename>test/unit/test_firewall_api_v1.py # -*- coding: utf-8 -*- # (C) Copyright IBM Corp. 2020. #", "'/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(),", "service.get_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert response.status_code ==", "writing, software # distributed under the License is distributed on an \"AS IS\"", "security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors']", "and verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json", "no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion", "# Invoke method response = service.set_security_level_setting( value=value, headers={} ) # Check for correct", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "headers={} ) # Check for correct operation assert len(responses.calls) == 1 assert response.status_code", "License. # You may obtain a copy of the License at # #", "} for param in req_param_dict.keys(): req_copy = {key:val if key is not param", "= json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def", "instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back to", "mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\":", "ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] =", "1 result_info_model_json['total_count'] = 200 # Construct a model instance of ResultInfo by calling", "def test_result_info_serialization(self): # Construct a json representation of a ResultInfo model result_info_model_json =", "False # Construct a model instance of ResultInfo by calling from_dict on the", "result_info_model2 # Convert model instance back to dict and verify no loss of", "correct operation assert len(responses.calls) == 1 assert response.status_code == 200 # Validate body", "a model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model_dict", "are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back to dict", "def test_security_level_setting_resp_serialization(self): # Construct dict forms of any model objects needed in order", "= security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #-----------------------------------------------------------------------------", "set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock", "\"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method", "get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url +", "compliance with the License. # You may obtain a copy of the License", "else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## #", "def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json =", "{} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200", "== 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock", "[[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in all", "Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class", "= {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] =", "service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## #", "from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify", "on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct", "Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure", "'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a", "is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) #", "Construct a model instance of SecurityLevelSettingResp by calling from_dict on the json representation", "content_type='application/json', status=200) # Set up parameter values value = 'under_attack' # Invoke method", "############################################################################## # region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): #", "of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert", "[['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp by calling", "# endregion ############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of", "will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return", "'/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\":", "for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- #", "assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate", "# test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url", "Verify the model instances are equivalent assert result_info_model == result_info_model2 # Convert model", "a model instance of ResultInfo by calling from_dict on the json representation result_info_model_dict", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "[[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method response", "content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting() # Check for correct operation", "model instance of ResultInfo by calling from_dict on the json representation result_info_model_dict =", "serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a", "body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params()", "result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] =", "model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back", "Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- #", "model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back", "test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\":", "for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def", "#-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json", "TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a", "assert response.status_code == 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert", "#----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): #", "not use this file except in compliance with the License. # You may", "\"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) #", "Convert model instance back to dict and verify no loss of data security_level_setting_resp_result_model_json2", "coding: utf-8 -*- # (C) Copyright IBM Corp. 2020. # # Licensed under", "the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a", "but one required param and check for a ValueError req_param_dict = { }", "# Preprocess the request URL to ensure the mock response will be found.", "# region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess", "service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start", "SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 #", "#----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): #", "base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## # region", "License, Version 2.0 (the \"License\"); # you may not use this file except", "security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent", "json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a model", "a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success']", "= 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem", "Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #--------------------------------------------------------", "test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\":", "with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ##############################################################################", "json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances", "\"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting() #", "# Set up parameter values value = 'under_attack' # Invoke method response =", "# -*- coding: utf-8 -*- # (C) Copyright IBM Corp. 2020. # #", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up", "no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- #", "by calling from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict)", "!= False # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on", "1 assert response.status_code == 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8'))", "import json import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn", "return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def", "result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test", "all but one required param and check for a ValueError req_param_dict = {", "json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances", "Construct a model instance of ResultInfo by calling from_dict on the json representation", "json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info']", "the mock response will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url)", "assert security_level_setting_resp_model != False # Construct a model instance of SecurityLevelSettingResp by calling", "2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status']", "\"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) #", "# you may not use this file except in compliance with the License.", "Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of", "in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class", "= 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url)", "agreed to in writing, software # distributed under the License is distributed on", "of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model", "SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance", "test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\":", "security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation of a", "mock response will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is", "# Start of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for", "crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting", "method response = service.get_security_level_setting() # Check for correct operation assert len(responses.calls) == 1", "body=mock_response, content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting() # Check for correct", "(the \"License\"); # you may not use this file except in compliance with", "License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import pytest import re", "param and check for a ValueError req_param_dict = { } for param in", "Convert model instance back to dict and verify no loss of data security_level_setting_resp_model_json2", "\"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values value =", "instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__", "for param in req_param_dict.keys(): req_copy = {key:val if key is not param else", "ResultInfo by calling from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model", "1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model", "parameter values value = 'under_attack' # Invoke method response = service.set_security_level_setting( value=value, headers={}", "the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json #", "else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): #", "Invoke method response = service.set_security_level_setting() # Check for correct operation assert len(responses.calls) ==", "true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "def test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response =", "= 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct", "from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify", "of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert", "= security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## # End of Model", "up parameter values value = 'under_attack' # Invoke method response = service.set_security_level_setting( value=value,", "== result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- #", "security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent", "url, body=mock_response, content_type='application/json', status=200) # Set up parameter values value = 'under_attack' #", "SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self):", "self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\":", "model instance back to dict and verify no loss of data security_level_setting_resp_result_model_json2 =", "# SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level'", "back to dict and verify no loss of data result_info_model_json2 = result_info_model.to_dict() assert", "model instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model_dict =", "IBM Corp. 2020. # # Licensed under the Apache License, Version 2.0 (the", "@responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response", "= service.get_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert response.status_code", "SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 =", "on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the", "serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of any model", "up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\":", "security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model", "Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL to", "file except in compliance with the License. # You may obtain a copy", "req_param_dict = { } for param in req_param_dict.keys(): req_copy = {key:val if key", "service.set_security_level_setting( value=value, headers={} ) # Check for correct operation assert len(responses.calls) == 1", "* crn = 'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier", "the specific language governing permissions and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator", "true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass", "= 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct", "a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable']", "# get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url", "= ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a model instance of ResultInfo", "#-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json", "in order to build this model. result_info_model = {} # ResultInfo result_info_model['page'] =", "License for the specific language governing permissions and # limitations under the License.", "(key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #-----------------------------------------------------------------------------", "the model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance", "a json representation of a ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1", "security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a model instance of", "= [security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp by calling from_dict on", "model instance back to dict and verify no loss of data result_info_model_json2 =", "ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200", "endregion ############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model", "# region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- #", "preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/')", "on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the", "to in writing, software # distributed under the License is distributed on an", "import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier = 'testString' service", "model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict =", "ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a model instance of ResultInfo by", "implied. # See the License for the specific language governing permissions and #", "a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict", "operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #--------------------------------------------------------", "= 'under_attack' # Invoke method response = service.set_security_level_setting( value=value, headers={} ) # Check", "\"License\"); # you may not use this file except in compliance with the", "security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model", "a model instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model", "calling from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack'", "representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are", "\"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one required", "security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem", "a model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model", "security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] #", "response = service.set_security_level_setting( value=value, headers={} ) # Check for correct operation assert len(responses.calls)", "the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a", "else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): #", "result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model =", "req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service: SecurityLevelSetting ##############################################################################", "= SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent assert", "= 200 # Construct a model instance of ResultInfo by calling from_dict on", "assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set", "#-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')", "class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct", "of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class", "or implied. # See the License for the specific language governing permissions and", "for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure the", "the request URL to ensure the mock response will be found. def preprocess_url(self,", "Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests ############################################################################## # region #-----------------------------------------------------------------------------", "# Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation", "verify no loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #-----------------------------------------------------------------------------", "############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class", "Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem", "[security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp by calling from_dict on the", "the model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance", "# Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "#-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json", "Convert model instance back to dict and verify no loss of data result_info_model_json2", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in all but", "of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests ############################################################################## # region", "Construct a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] =", "test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url +", "'under_attack' # Invoke method response = service.set_security_level_setting( value=value, headers={} ) # Check for", "\"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting() #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the", "'2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the", "status=200) # Invoke method response = service.get_security_level_setting() # Check for correct operation assert", "of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium'", "request URL to ensure the mock response will be found. def preprocess_url(self, request_url:", "for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of", "for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting", "for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def", "result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count']", "= security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] =", "by calling from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model !=", "#-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url =", "security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a model instance of", "forms of any model objects needed in order to build this model. result_info_model", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "\"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\":", "'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a", "#-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')", "security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] =", "test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\":", "= {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem by", "Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\",", "security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult", "# limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json", "calling from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) #", "model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance of", "instances are equivalent assert result_info_model == result_info_model2 # Convert model instance back to", "for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 # Validate", "SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 =", "correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error()", "representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] =", "SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- #", "#-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url =", "security_level_setting_resp_messages_item_model != False # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict", "ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent assert result_info_model", "instance back to dict and verify no loss of data result_info_model_json2 = result_info_model.to_dict()", "= ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent assert", "return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def", "instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json)", "ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation of a ResultInfo model", "class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct", "TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "False # Construct a model instance of SecurityLevelSettingResp by calling from_dict on the", "you may not use this file except in compliance with the License. #", "Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation", "objects needed in order to build this model. result_info_model = {} # ResultInfo", "order to build this model. result_info_model = {} # ResultInfo result_info_model['page'] = 1", "by calling from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model !=", "body=mock_response, content_type='application/json', status=200) # Set up parameter values value = 'under_attack' # Invoke", "to dict and verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2", "# Invoke method response = service.get_security_level_setting() # Check for correct operation assert len(responses.calls)", "import re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier =", "on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct", "\"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url,", "def test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response =", "#----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request", "2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH,", "+ '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock", "assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set", "governing permissions and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import", "if re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #--------------------------------------------------------", "SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a model instance of SecurityLevelSettingRespResult by", "@responses.activate def test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response", "response will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None:", "of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class", "to build this model. result_info_model = {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page']", "method response = service.set_security_level_setting( value=value, headers={} ) # Check for correct operation assert", "+ '/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock", "in req_param_dict.keys(): req_copy = {key:val if key is not param else None for", "200 # Construct a model instance of ResultInfo by calling from_dict on the", "dict forms of any model objects needed in order to build this model.", "response = service.set_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert", "use this file except in compliance with the License. # You may obtain", "be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url", "Copyright IBM Corp. 2020. # # Licensed under the Apache License, Version 2.0", "FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of", "model instance of ResultInfo by calling from_dict on the json representation result_info_model =", "= FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start", "{key:val if key is not param else None for (key,val) in req_param_dict.items()} with", "status=200) # Invoke method response = service.set_security_level_setting() # Check for correct operation assert", "json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a model", "region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test", "responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one required param", "security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult", "instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value']", "re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier = 'testString'", "#-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url =", "security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test", "data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for", "on the json representation security_level_setting_resp_result_model_dict = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the", "SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self):", "Construct a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] =", "calling from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False", "on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the", "result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent", "of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2", "calling from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False", "[{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values value", "for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of any model objects", "\"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1,", "on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct", "2.0 (the \"License\"); # you may not use this file except in compliance", "None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate", "on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct", "def test_set_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response =", "result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a model instance of", "equivalent assert result_info_model == result_info_model2 # Convert model instance back to dict and", "SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a SecurityLevelSettingRespMessagesItem model", "loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test", "# Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation", "back to dict and verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert", "verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #-----------------------------------------------------------------------------", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "by calling from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict)", "= True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance", "responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting() # Check", "1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): #", "# Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL", "content_type='application/json', status=200) # Pass in all but one required param and check for", "ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent assert result_info_model == result_info_model2 #", "a ValueError req_param_dict = { } for param in req_param_dict.keys(): req_copy = {key:val", "security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict", "# # Unless required by applicable law or agreed to in writing, software", "instance back to dict and verify no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict()", "\"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json',", "import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString'", "by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict)", "request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self):", "#----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization", "calling from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False", "express or implied. # See the License for the specific language governing permissions", "representation of a ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] =", "this model. result_info_model = {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2", "assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back to dict and verify", "json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value']", "model objects needed in order to build this model. result_info_model = {} #", "and verify no loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json", "no loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- #", "correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error()", "len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def", "= '2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on", "either express or implied. # See the License for the specific language governing", "= {key:val if key is not param else None for (key,val) in req_param_dict.items()}", "request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') +", "assert len(responses.calls) == 1 assert response.status_code == 200 # Validate body params req_body", "url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting() # Check for", "instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "assert result_info_model == result_info_model2 # Convert model instance back to dict and verify", "security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp by calling from_dict", "== 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self):", "response.status_code == 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value']", "'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the", "the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a", "the model instances are equivalent assert result_info_model == result_info_model2 # Convert model instance", "# Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for", "!= False # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on", "from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify", "= 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation", "calling from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) #", "= 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model =", "data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for", "200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200)", "[[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter", "#-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url =", "of ResultInfo by calling from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2", "1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response,", "the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model != False # Construct a", "# Verify the model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert", "responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier = 'testString' service =", "\"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values", "response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self): # Set up", "in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service: SecurityLevelSetting", "############################################################################## # Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class", "representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a model instance", "the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model", "of any model objects needed in order to build this model. result_info_model =", "instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json)", "'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation of", "the License. # You may obtain a copy of the License at #", "SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): # Construct dict forms of any model objects needed", "no loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- #", "result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model", "for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def", "assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo():", "are equivalent assert result_info_model == result_info_model2 # Convert model instance back to dict", "response = service.get_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert", "url, body=mock_response, content_type='application/json', status=200) # Pass in all but one required param and", "= 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "# Construct dict forms of any model objects needed in order to build", "from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False #", "a json representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level'", "= {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] =", "json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert result_info_model != False # Construct a model", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp", "SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json) assert security_level_setting_resp_model", "dict and verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 ==", "SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 #", "= 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct", "a model instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model_dict", "data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for", "json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self):", "# Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for", "len(responses.calls) == 1 assert response.status_code == 200 # Validate body params req_body =", "# test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url", "a model instance of ResultInfo by calling from_dict on the json representation result_info_model", "a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model", "pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for set_security_level_setting #----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess", "needed in order to build this model. result_info_model = {} # ResultInfo result_info_model['page']", "not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion", "{} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling", "= {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id']", "#----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization", "loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ##############################################################################", "req_body['value'] == 'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up", "security_level_setting_resp_model2 # Convert model instance back to dict and verify no loss of", "responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values value = 'under_attack'", "End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests ############################################################################## #", "Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespMessagesItem #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespMessagesItem():", "zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ##############################################################################", "Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #--------------------------------------------------------", "Construct a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] =", "= SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a model instance of SecurityLevelSettingRespMessagesItem", "#-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json", "from_dict on the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify", "result_info_model_json['total_count'] = 200 # Construct a model instance of ResultInfo by calling from_dict", "[[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Pass in all", "# Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for", "#-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')", "representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct", "a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a model", "loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test", "with the License. # You may obtain a copy of the License at", "SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_result_model", "# Verify the model instances are equivalent assert result_info_model == result_info_model2 # Convert", "Verify the model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model", "response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up", "= SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2", "SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a SecurityLevelSettingRespResult model", "pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ############################################################################## #", "representation of a SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] =", "{} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']]", "equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back to dict and", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "# test_get_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_value_error(self): # Set up mock url = self.preprocess_url(base_url", "security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo", "assert security_level_setting_resp_messages_item_model != False # Construct a model instance of SecurityLevelSettingRespMessagesItem by calling", "\"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\":", "'under_attack' #-------------------------------------------------------- # test_set_security_level_setting_required_params() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url", "model instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model =", "body=mock_response, content_type='application/json', status=200) # Pass in all but one required param and check", "Preprocess the request URL to ensure the mock response will be found. def", "representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are", "json representation of a ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page']", "# Pass in all but one required param and check for a ValueError", "None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.set_security_level_setting(**req_copy) # endregion ############################################################################## # End", "\"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json',", "Construct dict forms of any model objects needed in order to build this", "= service.set_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert response.status_code", "status=200) # Pass in all but one required param and check for a", "model. result_info_model = {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count']", "@responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response", "def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url else: return", "law or agreed to in writing, software # distributed under the License is", "dict and verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 ==", "the License for the specific language governing permissions and # limitations under the", "Pass in all but one required param and check for a ValueError req_param_dict", "result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct a model instance of ResultInfo", "model instance of SecurityLevelSettingResp by calling from_dict on the json representation security_level_setting_resp_model =", "result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #--------------------------------------------------------", "are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back to dict", "Set up parameter values value = 'under_attack' # Invoke method response = service.set_security_level_setting(", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] = 1", "== 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['value'] ==", "'/+') #-------------------------------------------------------- # get_security_level_setting() #-------------------------------------------------------- @responses.activate def test_get_security_level_setting_all_params(self): # Set up mock url", "zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url = 'https://api.cis.cloud.ibm.com'", "check for a ValueError req_param_dict = { } for param in req_param_dict.keys(): req_copy", "security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json", "[{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Invoke method response = service.set_security_level_setting()", "security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' #", "result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a", "ResultInfo by calling from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 =", "= service.set_security_level_setting( value=value, headers={} ) # Check for correct operation assert len(responses.calls) ==", "json import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn =", "{\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\":", "param in req_param_dict.keys(): req_copy = {key:val if key is not param else None", "pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import * crn = 'testString' zone_identifier", "URL to ensure the mock response will be found. def preprocess_url(self, request_url: str):", "SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model", "assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp():", "equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back to dict and", "def test_get_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response =", "'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance of", "to dict and verify no loss of data result_info_model_json2 = result_info_model.to_dict() assert result_info_model_json2", "security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json", "Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure", "# set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url", "instance back to dict and verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict()", "2, \"count\": 1, \"total_count\": 200}, \"success\": true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET,", "model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model_dict =", "service.set_security_level_setting() # Check for correct operation assert len(responses.calls) == 1 assert response.status_code ==", "inspect import json import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1 import *", "in compliance with the License. # You may obtain a copy of the", "result_info_model == result_info_model2 # Convert model instance back to dict and verify no", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' #", "= 200 security_level_setting_resp_messages_item_model = {} # SecurityLevelSettingRespMessagesItem security_level_setting_resp_messages_item_model['status'] = 'OK' security_level_setting_resp_result_model = {}", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "and check for a ValueError req_param_dict = { } for param in req_param_dict.keys():", "for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def", "Verify the model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model", "SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model", "= SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2", "serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation of a", "{ } for param in req_param_dict.keys(): req_copy = {key:val if key is not", "See the License for the specific language governing permissions and # limitations under", "security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model ==", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json).__dict__ security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent assert", "#----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization", "class TestSecurityLevelSettingRespMessagesItem(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct", "of a ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2", "# Construct a json representation of a SecurityLevelSettingResp model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result']", "assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate", "== security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingRespResult #----------------------------------------------------------------------------- class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- #", "a ResultInfo model result_info_model_json = {} result_info_model_json['page'] = 1 result_info_model_json['per_page'] = 2 result_info_model_json['count']", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class TestGetSecurityLevelSetting(): # Preprocess the", "#----------------------------------------------------------------------------- class TestSetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response", "# Verify the model instances are equivalent assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert", "serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json representation of a", "instance of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json)", "security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back to dict and verify no", "#----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization", "# Construct a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status']", "one required param and check for a ValueError req_param_dict = { } for", "== 1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_value_error(self):", "representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert security_level_setting_resp_messages_item_model != False # Construct a model instance", "result_info_model = {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] = 2 result_info_model['count'] =", "security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_model ==", "= [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model] # Construct a model instance of SecurityLevelSettingResp by", "Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting #----------------------------------------------------------------------------- class", "\"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up", "assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back to dict and verify", "True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult by calling", "= SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a model instance of SecurityLevelSettingRespResult", "result_info_model != False # Construct a model instance of ResultInfo by calling from_dict", "security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages']", "{} # SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on']", "data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## # End", "status=200) # Set up parameter values value = 'under_attack' # Invoke method response", "class TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct", "security_level_setting_resp_result_model2 # Convert model instance back to dict and verify no loss of", "content_type='application/json', status=200) # Invoke method response = service.get_security_level_setting() # Check for correct operation", "Version 2.0 (the \"License\"); # you may not use this file except in", "for the specific language governing permissions and # limitations under the License. from", "'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- #", "except in compliance with the License. # You may obtain a copy of", "\"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke method response =", "of SecurityLevelSettingRespMessagesItem by calling from_dict on the json representation security_level_setting_resp_messages_item_model = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json) assert", "ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self):", "= 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct a model instance", "class TestGetSecurityLevelSetting(): # Preprocess the request URL to ensure the mock response will", "NoAuthAuthenticator import inspect import json import pytest import re import responses from ibm_cloud_networking_services.firewall_api_v1", "is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #--------------------------------------------------------", "test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {}", "back to dict and verify no loss of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert", "result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances are equivalent assert result_info_model ==", "{} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z'", "of a SecurityLevelSettingRespMessagesItem model security_level_setting_resp_messages_item_model_json = {} security_level_setting_resp_messages_item_model_json['status'] = 'OK' # Construct a", "-*- coding: utf-8 -*- # (C) Copyright IBM Corp. 2020. # # Licensed", "from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import pytest import re import", "Class for SecurityLevelSettingResp #----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #--------------------------------------------------------", "2020. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## #", "are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back to dict", "security_level_setting_resp_result_model2 = SecurityLevelSettingRespResult(**security_level_setting_resp_result_model_dict) # Verify the model instances are equivalent assert security_level_setting_resp_result_model ==", "permissions and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect", "# End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests ##############################################################################", "model instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back", "== security_level_setting_resp_model2 # Convert model instance back to dict and verify no loss", "# Construct a model instance of ResultInfo by calling from_dict on the json", "[{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one", "representation security_level_setting_resp_result_model = SecurityLevelSettingRespResult.from_dict(security_level_setting_resp_result_model_json) assert security_level_setting_resp_result_model != False # Construct a model instance", "Start of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for get_security_level_setting", "back to dict and verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert", "result_info_model_json['per_page'] = 2 result_info_model_json['count'] = 1 result_info_model_json['total_count'] = 200 # Construct a model", "assert result_info_model != False # Construct a model instance of ResultInfo by calling", "true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Invoke", "method response = service.set_security_level_setting() # Check for correct operation assert len(responses.calls) == 1", "#-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): # Construct a json", "= 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## # region #-----------------------------------------------------------------------------", "security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True security_level_setting_resp_model_json['errors'] = [['testString']] security_level_setting_resp_model_json['messages'] = [security_level_setting_resp_messages_item_model]", "# Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a json representation", "req_copy = {key:val if key is not param else None for (key,val) in", "Test serialization/deserialization for SecurityLevelSettingRespMessagesItem #-------------------------------------------------------- def test_security_level_setting_resp_messages_item_serialization(self): # Construct a json representation of", "############################################################################## # End of Service: SecurityLevelSetting ############################################################################## ############################################################################## # Start of Model Tests", "model instance of SecurityLevelSettingRespResult by calling from_dict on the json representation security_level_setting_resp_result_model =", "SecurityLevelSettingRespResult model security_level_setting_resp_result_model_json = {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] =", "test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\":", "of ResultInfo by calling from_dict on the json representation result_info_model = ResultInfo.from_dict(result_info_model_json) assert", "instance of ResultInfo by calling from_dict on the json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__", "# Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\":", "loss of data security_level_setting_resp_result_model_json2 = security_level_setting_resp_result_model.to_dict() assert security_level_setting_resp_result_model_json2 == security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test", "= True security_level_setting_resp_result_model['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a json representation of a SecurityLevelSettingResp", "= {} security_level_setting_resp_result_model_json['id'] = 'security_level' security_level_setting_resp_result_model_json['value'] = 'medium' security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] =", "import NoAuthAuthenticator import inspect import json import pytest import re import responses from", "\"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200},", "== security_level_setting_resp_messages_item_model2 # Convert model instance back to dict and verify no loss", "# SecurityLevelSettingRespResult security_level_setting_resp_result_model['id'] = 'security_level' security_level_setting_resp_result_model['value'] = 'medium' security_level_setting_resp_result_model['editable'] = True security_level_setting_resp_result_model['modified_on'] =", "under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import pytest", "service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## # region #----------------------------------------------------------------------------- # Test", "the json representation security_level_setting_resp_messages_item_model_dict = SecurityLevelSettingRespMessagesItem.from_dict(security_level_setting_resp_messages_item_model_json).__dict__ security_level_setting_resp_messages_item_model2 = SecurityLevelSettingRespMessagesItem(**security_level_setting_resp_messages_item_model_dict) # Verify the model", "crn = 'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier )", "+ '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"},", "None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test Class for", "param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- # Test", "@responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response", ") base_url = 'https://api.cis.cloud.ibm.com' service.set_service_url(base_url) ############################################################################## # Start of Service: SecurityLevelSetting ############################################################################## #", "true, \"errors\": [[\"errors\"]], \"messages\": [{\"status\": \"OK\"}]}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set", "operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_security_level_setting_value_error() #--------------------------------------------------------", "security_level_setting_resp_result_model != False # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict", "set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set up mock url = self.preprocess_url(base_url +", "security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict", "Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #", "len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_set_security_level_setting_value_error() #-------------------------------------------------------- @responses.activate def", "TestSecurityLevelSettingRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingRespResult #-------------------------------------------------------- def test_security_level_setting_resp_result_serialization(self): # Construct a", "\"OK\"}]}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Pass in all but one required", "calling from_dict on the json representation security_level_setting_resp_model_dict = SecurityLevelSettingResp.from_dict(security_level_setting_resp_model_json).__dict__ security_level_setting_resp_model2 = SecurityLevelSettingResp(**security_level_setting_resp_model_dict) #", "not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #----------------------------------------------------------------------------- #", "# Verify the model instances are equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert", "true, \"modified_on\": \"2014-01-01T05:20:00.12345Z\"}, \"result_info\": {\"page\": 1, \"per_page\": 2, \"count\": 1, \"total_count\": 200}, \"success\":", "#-------------------------------------------------------- @responses.activate def test_set_security_level_setting_required_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level')", "security_level_setting_resp_result_model_json['editable'] = True security_level_setting_resp_result_model_json['modified_on'] = '2014-01-01T05:20:00.12345Z' # Construct a model instance of SecurityLevelSettingRespResult", "== security_level_setting_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- #", "== result_info_model2 # Convert model instance back to dict and verify no loss", "#----------------------------------------------------------------------------- class TestSecurityLevelSettingResp(): #-------------------------------------------------------- # Test serialization/deserialization for SecurityLevelSettingResp #-------------------------------------------------------- def test_security_level_setting_resp_serialization(self): #", "1 result_info_model['per_page'] = 2 result_info_model['count'] = 1 result_info_model['total_count'] = 200 security_level_setting_resp_messages_item_model = {}", "and verify no loss of data security_level_setting_resp_model_json2 = security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json", "build this model. result_info_model = {} # ResultInfo result_info_model['page'] = 1 result_info_model['per_page'] =", "#----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo #-------------------------------------------------------- def test_result_info_serialization(self): #", "= self.preprocess_url(base_url + '/v1/testString/zones/testString/settings/security_level') mock_response = '{\"result\": {\"id\": \"security_level\", \"value\": \"medium\", \"editable\": true,", "instances are equivalent assert security_level_setting_resp_model == security_level_setting_resp_model2 # Convert model instance back to", "# (C) Copyright IBM Corp. 2020. # # Licensed under the Apache License,", "the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import json import pytest import", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Test Class for ResultInfo #----------------------------------------------------------------------------- class TestResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ResultInfo", "security_level_setting_resp_model.to_dict() assert security_level_setting_resp_model_json2 == security_level_setting_resp_model_json # endregion ############################################################################## # End of Model Tests", "and # limitations under the License. from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator import inspect import", "equivalent assert security_level_setting_resp_result_model == security_level_setting_resp_result_model2 # Convert model instance back to dict and", "assert security_level_setting_resp_messages_item_model == security_level_setting_resp_messages_item_model2 # Convert model instance back to dict and verify", "is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_security_level_setting(**req_copy) #-----------------------------------------------------------------------------", "False # Construct a model instance of SecurityLevelSettingRespResult by calling from_dict on the", "model security_level_setting_resp_model_json = {} security_level_setting_resp_model_json['result'] = security_level_setting_resp_result_model security_level_setting_resp_model_json['result_info'] = result_info_model security_level_setting_resp_model_json['success'] = True", "of data security_level_setting_resp_messages_item_model_json2 = security_level_setting_resp_messages_item_model.to_dict() assert security_level_setting_resp_messages_item_model_json2 == security_level_setting_resp_messages_item_model_json #----------------------------------------------------------------------------- # Test Class", "json representation result_info_model_dict = ResultInfo.from_dict(result_info_model_json).__dict__ result_info_model2 = ResultInfo(**result_info_model_dict) # Verify the model instances", "= result_info_model.to_dict() assert result_info_model_json2 == result_info_model_json #----------------------------------------------------------------------------- # Test Class for SecurityLevelSettingResp #-----------------------------------------------------------------------------", "'testString' zone_identifier = 'testString' service = FirewallApiV1( authenticator=NoAuthAuthenticator(), crn=crn, zone_identifier=zone_identifier ) base_url =", "return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # set_security_level_setting() #-------------------------------------------------------- @responses.activate def test_set_security_level_setting_all_params(self): # Set" ]
[ "self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action", "BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name =", "BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern)", "from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern", "action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertIsNone(evdef.action)", "evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action =", "evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action = None evdef", "BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\"", "= BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name", "TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action = NoAction() evdef", "test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name,", "self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action = None", "NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def", "\\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action", "BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern)", "class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action = NoAction()", "import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern()", "def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action = None evdef =", "NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def", "action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action,", "= BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern,", "= \"evdef_name\" pattern = BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action)", "from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self):", "evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern()", "bobocep.rules.actions.no_action import NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class", "\"evdef_name\" pattern = BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name,", "pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\"", "name = \"evdef_name\" pattern = BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern,", "action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern", "= NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action)", "import NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase):", "name = \"evdef_name\" pattern = BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern,", "from bobocep.rules.actions.no_action import NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent", "self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self): name = \"evdef_name\" pattern =", "unittest from bobocep.rules.actions.no_action import NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\", "bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name", "pattern = BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name)", "= BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern,", "test_constructor_actions_is_none(self): name = \"evdef_name\" pattern = BoboPattern() action = None evdef = BoboComplexEvent(name=name,", "def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action = NoAction() evdef =", "import BoboPattern from bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name =", "evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name) self.assertEqual(pattern, evdef.pattern) self.assertEqual(action, evdef.action) def test_constructor_actions_is_none(self):", "= \"evdef_name\" pattern = BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action)", "import unittest from bobocep.rules.actions.no_action import NoAction from bobocep.rules.nfas.patterns.bobo_pattern import BoboPattern from bobocep.setup.bobo_complex_event import", "\"evdef_name\" pattern = BoboPattern() action = NoAction() evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name,", "bobocep.setup.bobo_complex_event import \\ BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern =", "pattern = BoboPattern() action = None evdef = BoboComplexEvent(name=name, pattern=pattern, action=action) self.assertEqual(name, evdef.name)", "BoboComplexEvent class TestBoboComplexEvent(unittest.TestCase): def test_constructor(self): name = \"evdef_name\" pattern = BoboPattern() action =" ]
[ "OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email", "None: existing.is_primary = False existing.save(update_primary=False) elif existing is None: self.is_primary = True super(PrimaryProperty,", "django.utils.translation import ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist", "True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True", "*args, **kwargs): if self.value == 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact", "verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return ', '.join([str(c) for", "city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"),", "try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if", "def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile',", "= _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\"))", "PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True def save(self, *args, **kwargs): self.contact.save()", "self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary: if existing", "country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta:", "self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes # Every contact property must", "django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS =", "def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"),", "owner): if instance is None: return self return self.get_collection(instance).primary() def __set__(self, instance, value):", "class Meta: abstract = True def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self))", "_('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"),", "contact property must inherit from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class", "('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value", "try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes # Every contact", "from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS", "% self.name or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name):", ") IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')),", "**kwargs): if self.value == 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact =", "_('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES = ( ('google', _('Google Talk')), ('aim',", "models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact", "= models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\")", "instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty):", "= _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline',", "and '%s: ' % self.name or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object):", "def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing =", "class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account =", "Base classes # Every contact property must inherit from either ContactProperty or #", "fields and methods, models can implement this for # themselves if different. class", "class Meta: abstract = True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs)", "blank=True) class Meta: abstract = True def __unicode__(self): return u'%s%s' % (self.name and", "instance is None: return self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary =", "models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',)", "return None # Base classes # Every contact property must inherit from either", "class Meta: abstract = True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True)", "name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def __unicode__(self): return u'%s:", "\" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact',", "models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31,", "models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self): return u'%s [%s]'", "_(\"links\") def save(self, *args, **kwargs): if self.value == 'http://': return super(Link, self).save(*args, **kwargs)", "= models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\")", "contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\")", "_(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name # primary", "address\"), max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property", "blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal", "self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line", "properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance): return", "_(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return ', '.join([str(c) for c in", "max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self): return u'%s [%s]' %", "models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\")", "inherit from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract =", "_('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None", "verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = (", "True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def", "contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 =", "or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 =", "= models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True)", "Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact =", "= PrimaryPropertyManager() class Meta: abstract = True def save(self, *args, **kwargs): update_primary =", "= PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def", "@property def value(self): data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \",", "if instance is None: return self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary", "= True def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name", "Mixin classes # Abstacts out common fields and methods, models can implement this", "contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural", "def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes #", "class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract =", "def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def", "value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary", "('name',) def __unicode__(self): return self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account", "True def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name =", "from django.utils.translation import ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import", "if self.value == 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact',", "models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True def __unicode__(self): return u'%s%s' %", "__unicode__(self): return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200,", "service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"),", "u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value", "# PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True def save(self, *args, **kwargs):", "None if self.is_primary: if existing is not None: existing.is_primary = False existing.save(update_primary=False) elif", "choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class Meta: verbose_name", "NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\")", "notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class", "models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty):", "= models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract = True def save(self,", "True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts out common fields and", "return u'%s%s' % (self.name and '%s: ' % self.name or \"\", self.value) #", "LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2", "Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"),", "= models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\")", "self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title", "can implement this for # themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"),", "editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def", "PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self):", "OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True def __unicode__(self):", "_(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name and \"%s: \" %", "class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if", "code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\")", "class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127,", "Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class", "PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model): name =", "models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"),", "\"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True)", "editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\")", "models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural =", "if i]) class Contact(models.Model): \"\"\" A person or company. \"\"\" name = models.CharField(max_length=200)", "choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self): return u'%s [%s]' % (self.value,", "models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"),", "% (self.name and \"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty,", "if self.is_primary: if existing is not None: existing.is_primary = False existing.save(update_primary=False) elif existing", "save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False)", "_(\"IM accounts\") @property def value(self): return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact',", "*args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta:", "contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100)", "__unicode__(self): return u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact',", "abstract = True def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model):", "models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\")", "% self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\")", "*args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects", "self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members =", "value(self): data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i for", "return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance):", "for obj in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact =", "= models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone", "class Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def value(self):", "and methods, models can implement this for # themselves if different. class LabeledProperty(models.Model):", "(self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True", "models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural =", "self.country, self.postcode] return \", \".join([i for i in data if i]) class Contact(models.Model):", "Meta: abstract = True def __unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class", "_('other')), ) IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn',", "addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account", "= [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i for i in", "= True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts out common fields", "= None if self.is_primary: if existing is not None: existing.is_primary = False existing.save(update_primary=False)", "related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"),", "= models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127)", "class Meta: abstract = True def __unicode__(self): return u'%s: %s' % (self.name, self.value)", "self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i for i in data if", "**kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract", "class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self):", "self.city, self.state, self.country, self.postcode] return \", \".join([i for i in data if i])", "('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200,", "max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural", "verbose_name_plural = _(\"IM accounts\") @property def value(self): return self.account class Link(ContactProperty, NamedProperty): contact", "NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def __unicode__(self): return", "= models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural", "Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta:", "PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False)", "= _(\"IM accounts\") @property def value(self): return self.account class Link(ContactProperty, NamedProperty): contact =", "objects = PrimaryPropertyManager() class Meta: abstract = True def save(self, *args, **kwargs): update_primary", "class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return", "related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name, self.value) class", "models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract = True def save(self, *args,", "models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name =", "blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False)", "models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural =", "Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name =", "PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class", "= True def __unicode__(self): return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name", "date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering", "address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127,", "% (self.name and '%s: ' % self.name or \"\", self.value) # Contact properties", "= models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"),", "label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name =", "is not None: existing.is_primary = False existing.save(update_primary=False) elif existing is None: self.is_primary =", "u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta:", "getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance is None: return self return", "= PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model): name", "_('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist:", "class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base", "PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact", "% (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract =", "verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name and \"%s:", "= True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary =", "ObjectDoesNotExist: return None # Base classes # Every contact property must inherit from", "%s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta:", "(self.name and \"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty):", "save(self, *args, **kwargs): if self.value == 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty):", "_(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name # primary contact properies email_address", "if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract =", "True def __unicode__(self): return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name =", "Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if self.value", "= ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\")", "'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name =", "code/zip code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal", "from django.db import models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other', _('other')),", "[%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract", "name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True def __unicode__(self): return", "'%s: ' % self.name or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def", "models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural", "PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description", "= _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return ', '.join([str(c) for c", "\".join([i for i in data if i]) class Contact(models.Model): \"\"\" A person or", "self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1", "def __get__(self, instance, owner): if instance is None: return self return self.get_collection(instance).primary() def", ") contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'),", "def value(self): return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value =", "# Base classes # Every contact property must inherit from either ContactProperty or", "models can implement this for # themselves if different. class LabeledProperty(models.Model): label =", "= _(\"postal addresses\") @property def value(self): data = [self.address1, self.address2, self.city, self.state, self.country,", "= models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\")", "PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses')", "self).save(*args, **kwargs) # Mixin classes # Abstacts out common fields and methods, models", "max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return", "verbose_name_plural = _(\"postal addresses\") @property def value(self): data = [self.address1, self.address2, self.city, self.state,", "self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager()", "__unicode__(self): return u'%s%s [%s]' % (self.name and \"%s: \" % self.name or \"\",", "max_length=200) class Meta: abstract = True def __unicode__(self): return u'%s: %s' % (self.name,", "verbose_name_plural = _(\"groups\") @property def member_list(self): return ', '.join([str(c) for c in self.members.all()[:5]])", "kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist:", "label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self): return", "= _(\"links\") def save(self, *args, **kwargs): if self.value == 'http://': return super(Link, self).save(*args,", "(self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract =", "or email address\"), max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM", "i in data if i]) class Contact(models.Model): \"\"\" A person or company. \"\"\"", "line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city =", "verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def value(self): data =", "= models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class", "NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name", "__unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')),", "class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return ',", "def __unicode__(self): return u'%s%s [%s]' % (self.name and \"%s: \" % self.name or", "_ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import models", "\\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary: if existing is", "postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\")", "numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name and \"%s: \" % self.name", "or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True def save(self, *args,", "max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property", "= models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural", "None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts out", "super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts out common fields and methods,", "self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value", "= PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True)", "_('work')), ('other', _('other')), ) IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo',", "def value(self): data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i", "classes # Every contact property must inherit from either ContactProperty or # PrimaryPropery", "either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True def", "u'%s%s [%s]' % (self.name and \"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self))", "Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s", "# Mixin classes # Abstacts out common fields and methods, models can implement", "@property def address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description =", "IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq',", "max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode =", "related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email", "models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated =", "except ObjectDoesNotExist: existing = None if self.is_primary: if existing is not None: existing.is_primary", "ContactProperty(models.Model): class Meta: abstract = True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args,", "models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class Meta: verbose_name = _(\"IM account\")", "company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return", "existing.save(update_primary=False) elif existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin", "models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class", "_('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True)", "Contact(models.Model): \"\"\" A person or company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"),", "1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"),", "import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS = (", "def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True return class", "= ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')),", "== 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name", "= models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated", "blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def", "in data if i]) class Contact(models.Model): \"\"\" A person or company. \"\"\" name", "verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"),", "must inherit from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract", "max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def", "different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True", "address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state", "data if i]) class Contact(models.Model): \"\"\" A person or company. \"\"\" name =", "models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES =", "class ContactProperty(models.Model): class Meta: abstract = True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self,", "themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract", "= False existing.save(update_primary=False) elif existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs)", "CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s:", "('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')),", "max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self):", "self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact", "True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing", "or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name =", "Meta: abstract = True def __unicode__(self): return u'%s%s' % (self.name and '%s: '", "= collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner): if", "False existing.save(update_primary=False) elif existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) #", "return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"),", "Meta: abstract = True def __unicode__(self): return u'%s: %s' % (self.name, self.value) class", "PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes", "_('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), )", "( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber',", "EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name", "instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in", "% (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract", "value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name, self.value) class Date(ContactProperty,", "= models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state =", "number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name and", "**kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title =", "= models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty,", "max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args,", "phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model):", "**kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact,", "class Contact(models.Model): \"\"\" A person or company. \"\"\" name = models.CharField(max_length=200) is_company =", "super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200)", "models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class", "# themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta:", "u'%s%s' % (self.name and '%s: ' % self.name or \"\", self.value) # Contact", "models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta:", "Abstacts out common fields and methods, models can implement this for # themselves", "self.name or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name", "is None: return self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True", "None: return self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value)", "= True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary", "= _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS", "Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return ', '.join([str(c)", "models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural", "instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance is None: return", "existing.is_primary = False existing.save(update_primary=False) elif existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args,", "def address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"),", "existing is not None: existing.is_primary = False existing.save(update_primary=False) elif existing is None: self.is_primary", "django.db import models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other', _('other')), )", "abstract = True def __unicode__(self): return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model):", "('other', _('other')), ) IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')), ('yahoo', _('Yahoo')),", "if existing is not None: existing.is_primary = False existing.save(update_primary=False) elif existing is None:", "ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS = ( ('home',", "verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact',", "_(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES)", "= _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if self.value == 'http://':", "# Every contact property must inherit from either ContactProperty or # PrimaryPropery class", "self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta:", "class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True def", "__unicode__(self): return u'%s%s' % (self.name and '%s: ' % self.name or \"\", self.value)", "models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line", "self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company =", "account\") verbose_name_plural = _(\"IM accounts\") @property def value(self): return self.account class Link(ContactProperty, NamedProperty):", "A person or company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo", "LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def", "date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\")", "= models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\") class", "name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes", "return self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def", "('work', _('work')), ('other', _('other')), ) IM_SERVICES = ( ('google', _('Google Talk')), ('aim', _('AIM')),", "= True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try:", "return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members", "photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False)", "not None: existing.is_primary = False existing.save(update_primary=False) elif existing is None: self.is_primary = True", "related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name", "is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True)", "verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def value(self): return self.account", "_(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')),", "return \", \".join([i for i in data if i]) class Contact(models.Model): \"\"\" A", "ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True def save(self,", "_(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value", "_(\"groups\") @property def member_list(self): return ', '.join([str(c) for c in self.members.all()[:5]]) def __unicode__(self):", "Every contact property must inherit from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model):", "value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all():", "models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta:", "True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing", "__get__(self, instance, owner): if instance is None: return self return self.get_collection(instance).primary() def __set__(self,", "account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class Meta: verbose_name =", "= models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural =", "from django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS = ( ('home', _('home')),", "self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')),", "PrimaryPropertyManager() class Meta: abstract = True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary',", "self.is_primary: if existing is not None: existing.is_primary = False existing.save(update_primary=False) elif existing is", "_('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS)", "for i in data if i]) class Contact(models.Model): \"\"\" A person or company.", "@property def member_list(self): return ', '.join([str(c) for c in self.members.all()[:5]]) def __unicode__(self): return", "_(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if self.value == 'http://': return", "PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact',", "address\") verbose_name_plural = _(\"postal addresses\") @property def value(self): data = [self.address1, self.address2, self.city,", "__unicode__(self): return self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts')", "def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"),", "methods, models can implement this for # themselves if different. class LabeledProperty(models.Model): label", "address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service =", "\"\"\" A person or company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False)", "_(\"postal addresses\") @property def value(self): data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode]", "= models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address", "class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta:", "is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts", "= ('name',) def __unicode__(self): return self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses')", "self.state, self.country, self.postcode] return \", \".join([i for i in data if i]) class", "2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True)", "class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def __unicode__(self):", "collection_name): self.collection_name = collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance,", "contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user", "company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos',", "# Abstacts out common fields and methods, models can implement this for #", "self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete()", "contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name =", "description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name =", "common fields and methods, models can implement this for # themselves if different.", "\", \".join([i for i in data if i]) class Contact(models.Model): \"\"\" A person", "class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class", "OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact =", "LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class Meta: verbose_name =", "Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self): return", "models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name, self.value)", "verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name", "NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s'", "class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), )", "import ObjectDoesNotExist from django.db import models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')),", "= True def __unicode__(self): return u'%s%s' % (self.name and '%s: ' % self.name", "data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i for i", "existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary:", "def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for", "= kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except", "abstract = True def __unicode__(self): return u'%s%s' % (self.name and '%s: ' %", "= models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class Meta: verbose_name = _(\"IM", "as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db import", "if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing =", "related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name", "= models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def __unicode__(self): return u'%s: %s'", "ObjectDoesNotExist from django.db import models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other',", "self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True return", "i]) class Contact(models.Model): \"\"\" A person or company. \"\"\" name = models.CharField(max_length=200) is_company", "(self.name and '%s: ' % self.name or \"\", self.value) # Contact properties class", "existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes #", "return getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance is None: return self", "return u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\")", "('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label =", "( ('landline', _('landline')), ('mobile', _('mobile')), ('fax', _('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label", "primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes # Every", "self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def", "save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id)", "abstract = True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary:", "= _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name and \"%s: \"", "('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return", "value = models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone", "def __unicode__(self): return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"),", "models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural", "_(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' % (self.name", "\"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address", "class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class", "return u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True)", "= models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name,", "verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if self.value == 'http://': return super(Link,", "in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\")", "= self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary: if", "= _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\")", "ObjectDoesNotExist: existing = None if self.is_primary: if existing is not None: existing.is_primary =", "__init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self,", "('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return", "def save(self, *args, **kwargs): if self.value == 'http://': return super(Link, self).save(*args, **kwargs) class", "blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class", "[self.address1, self.address2, self.city, self.state, self.country, self.postcode] return \", \".join([i for i in data", "default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True,", "= _(\"contact\") verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name #", "verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self):", "models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name", "address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True)", "= models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def", "except ObjectDoesNotExist: return None # Base classes # Every contact property must inherit", "return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200) class", "('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES = ( ('google', _('Google Talk')),", "properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers')", "= models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta:", "self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True", "im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property", "return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS = ( ('landline', _('landline')), ('mobile', _('mobile')), ('fax',", "related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural =", "max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True)", "return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200,", "get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance is None:", "title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\")", "__set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self, instance): self.get_collection(instance).primary().delete() for obj", "this for # themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS)", "= PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address", "members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\")", "class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact", "= models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class", "models.CharField(_(\"name\"), max_length=200) class Meta: abstract = True def __unicode__(self): return u'%s: %s' %", "verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]'", "update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None", "max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country", "django.core.exceptions import ObjectDoesNotExist from django.db import models PROPERTY_LABELS = ( ('home', _('home')), ('work',", "_(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value = models.EmailField(_(\"address\")) class", "update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\ .filter(contact=self.contact, is_primary=True).get()", "Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance):", "classes # Abstacts out common fields and methods, models can implement this for", "(self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class", "existing = None if self.is_primary: if existing is not None: existing.is_primary = False", "= models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class", "_(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def value(self): return self.account class Link(ContactProperty,", "PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract = True", "models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self,", "('aim', _('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager):", "is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary: if existing is not None:", "_('fax')), ) contact = models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value =", "Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200,", "= models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty):", "self return self.get_collection(instance).primary() def __set__(self, instance, value): value.is_primary = True self.get_collection(instance).add(value) def __delete__(self,", "models.CharField(_(\"address line 1\"), max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city", "value(self): return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'),", "('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except", "_(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service", "models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created =", "PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance): return getattr(instance, self.collection_name)", "default=False) objects = PrimaryPropertyManager() class Meta: abstract = True def save(self, *args, **kwargs):", "contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True)", "= _(\"groups\") @property def member_list(self): return ', '.join([str(c) for c in self.members.all()[:5]]) def", "out common fields and methods, models can implement this for # themselves if", "class Meta: abstract = True def __unicode__(self): return u'%s%s' % (self.name and '%s:", "PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact = models.ForeignKey('Contact', related_name=\"postal_addresses\") address1 = models.CharField(_(\"address line 1\"),", "for # themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class", "_('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self): try:", "postal_address = PrimaryPropertyDescriptor('postal_addresses') @property def address(self): return self.postal_address class Group(models.Model): name = models.CharField(max_length=200,", "blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural =", "Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty,", ".filter(contact=self.contact, is_primary=True).get() except ObjectDoesNotExist: existing = None if self.is_primary: if existing is not", "def member_list(self): return ', '.join([str(c) for c in self.members.all()[:5]]) def __unicode__(self): return self.name", "class Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact", "return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None # Base classes # Every contact property", "ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from django.db", "models.ForeignKey('Contact', related_name=\"phone_numbers\") label = models.CharField(_(\"label\"), max_length=200, choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta:", "def __unicode__(self): return self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account =", "Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact =", "# primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations')", "verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact',", "self.value == 'http://': return super(Link, self).save(*args, **kwargs) class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\")", "= models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes =", "contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self): return u'%s: %s' %", "Meta: verbose_name = _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def value(self): data", "related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email", "value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty,", "class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name", "Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def value(self): return", "= _(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\")", "upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True,", "__delete__(self, instance): self.get_collection(instance).primary().delete() for obj in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty,", "import models PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES", "= ( ('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES = ( ('google',", "accounts\") @property def value(self): return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\")", "PROPERTY_LABELS = ( ('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES = (", "\"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name", "related_name=\"dates\") value = models.DateField(_(\"date\")) class Meta: verbose_name = _(\"date\") verbose_name_plural = _(\"dates\") class", "self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value = models.URLField(_('URL'), max_length=200, default='http://')", "from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta: abstract = True", "= PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address =", "= models.CharField(_(\"name\"), max_length=200, blank=True) class Meta: abstract = True def __unicode__(self): return u'%s%s'", "models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def", "max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural =", "addresses\") @property def value(self): data = [self.address1, self.address2, self.city, self.state, self.country, self.postcode] return", "primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number", "[%s]' % (self.name and \"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class", "obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value =", "def __unicode__(self): return u'%s: %s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact =", "blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country =", "= _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def value(self): return self.account class", "line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127,", "default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs):", "name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name =", "blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) date_updated = models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name =", ") class PrimaryPropertyManager(models.Manager): def primary(self): try: return self.get_queryset().get(is_primary=True) except ObjectDoesNotExist: return None #", "= _(\"email addresses\") class IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30,", "= models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"), upload_to='var/addressbook/photos', blank=True) notes = models.TextField(_(\"notes\"), blank=True) date_created", "class Group(models.Model): name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact,", "_(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def value(self): data = [self.address1, self.address2,", "email address\"), max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\")", "%s' % (self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value =", "models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or", "Meta: abstract = True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class", "' % self.name or \"\", self.value) # Contact properties class PrimaryPropertyDescriptor(object): def __init__(self,", "% (self.name, self.value) class Date(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"dates\") value = models.DateField(_(\"date\"))", "value = models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\")", "_(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty): PHONE_NUM_LABELS =", "max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200) class Meta:", "models.CharField(_(\"city\"), max_length=127, blank=True) state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode", "and \"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact", "ordering = ('name',) def __unicode__(self): return self.name # primary contact properies email_address =", "self.collection_name) def __get__(self, instance, owner): if instance is None: return self return self.get_collection(instance).primary()", "choices=PHONE_NUM_LABELS) value = models.CharField(_('number'), max_length=100) class Meta: verbose_name = _(\"phone number\") verbose_name_plural =", "return u'%s%s [%s]' % (self.name and \"%s: \" % self.name or \"\", self.value,", "u'%s: %s' % (self.name, self.value) class OptionalNamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200, blank=True) class", "Meta: abstract = True def save(self, *args, **kwargs): update_primary = kwargs.pop('update_primary', True) if", "elif existing is None: self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes", "blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127, blank=True)", "max_length=127, blank=False) address2 = models.CharField(_(\"address line 2\"), max_length=127, blank=True) city = models.CharField(_(\"city\"), max_length=127,", "class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self, instance): return getattr(instance,", "name = models.CharField(max_length=200, unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True)", "blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def member_list(self): return", "obj in self.get_collection(instance).all(): obj.is_primary = True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact',", "instance, owner): if instance is None: return self return self.get_collection(instance).primary() def __set__(self, instance,", "unique=True) description = models.TextField(_(\"description\"), blank=True) members = models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name", "= True return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\"))", "abstract = True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty):", "**kwargs) # Mixin classes # Abstacts out common fields and methods, models can", "= models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self): return u'%s", "class Meta: verbose_name = _(\"IM account\") verbose_name_plural = _(\"IM accounts\") @property def value(self):", "verbose_name_plural = _(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name # primary contact", "# Contact properties class PrimaryPropertyDescriptor(object): def __init__(self, collection_name): self.collection_name = collection_name def get_collection(self,", "models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\") class IMAccount(PrimaryProperty):", "= _(\"contacts\") ordering = ('name',) def __unicode__(self): return self.name # primary contact properies", "self.collection_name = collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner):", "**kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary = models.BooleanField(_(\"primary\"), default=False) objects =", "import ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions import ObjectDoesNotExist from", "self.is_primary = True super(PrimaryProperty, self).save(*args, **kwargs) # Mixin classes # Abstacts out common", "collection_name def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance", "IMAccount(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"),", "_('AIM')), ('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def", "verbose_name_plural = _(\"dates\") class EmailAddress(PrimaryProperty, LabeledProperty, OptionalNamedProperty): contact = models.ForeignKey('Contact', related_name=\"email_addresses\") value =", "@property def value(self): return self.account class Link(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"links\") value", "= models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name", "self.postcode] return \", \".join([i for i in data if i]) class Contact(models.Model): \"\"\"", "<filename>addressbook/models.py from django.utils.translation import ugettext_lazy as _ from django.contrib.contenttypes.models import ContentType from django.core.exceptions", "LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def __unicode__(self):", "max_length=200, blank=True) class Meta: abstract = True def __unicode__(self): return u'%s%s' % (self.name", "return class CustomField(ContactProperty, NamedProperty): contact = models.ForeignKey('Contact', related_name=\"custom_fields\") value = models.TextField(_(\"value\")) def __unicode__(self):", "True def __unicode__(self): return u'%s%s' % (self.name and '%s: ' % self.name or", "class Organization(PrimaryProperty): contact = models.ForeignKey('Contact', related_name=\"organizations\") name = models.CharField(_(\"name\"), max_length=200) title = models.CharField(_(\"title\"),", "= models.DateTimeField(auto_now=True, editable=False) class Meta: verbose_name = _(\"contact\") verbose_name_plural = _(\"contacts\") ordering =", "= models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property", "None # Base classes # Every contact property must inherit from either ContactProperty", "person or company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo =", "= _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return u'%s%s [%s]' %", "state = models.CharField(_(\"state/province/region\"), max_length=127, blank=True) country = models.CharField(_(\"country\"), max_length=127) postcode = models.CharField(_(\"postal code/zip", "max_length=127) postcode = models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal", "blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name", "email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number = PrimaryPropertyDescriptor('phone_numbers') postal_address", "= models.URLField(_('URL'), max_length=200, default='http://') class Meta: verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def", "contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company = PrimaryPropertyDescriptor('organizations') phone_number =", "is_primary = models.BooleanField(_(\"primary\"), default=False) objects = PrimaryPropertyManager() class Meta: abstract = True def", "or company. \"\"\" name = models.CharField(max_length=200) is_company = models.BooleanField(_(\"company\"), default=False) photo = models.ImageField(_(\"photo\"),", "verbose_name = _(\"link\") verbose_name_plural = _(\"links\") def save(self, *args, **kwargs): if self.value ==", "\"%s: \" % self.name or \"\", self.value, PhoneNumber.get_label_display(self)) class PostalAddress(PrimaryProperty, LabeledProperty): contact =", "True def save(self, *args, **kwargs): self.contact.save() models.Model.save(self, *args, **kwargs) class PrimaryProperty(ContactProperty): is_primary =", "property must inherit from either ContactProperty or # PrimaryPropery class ContactProperty(models.Model): class Meta:", "= _(\"postal address\") verbose_name_plural = _(\"postal addresses\") @property def value(self): data = [self.address1,", "help_text=_(\"user name or email address\"), max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural", "models.CharField(_(\"title\"), max_length=200, blank=True) class Meta: verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self):", "= models.ForeignKey('Contact', related_name=\"im_accounts\") service = models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name", "verbose_name = _(\"organization\") verbose_name_plural = _(\"organizations\") def __unicode__(self): return self.name class PhoneNumber(PrimaryProperty, OptionalNamedProperty):", "def __unicode__(self): return u'%s%s' % (self.name and '%s: ' % self.name or \"\",", "__unicode__(self): return u'%s [%s]' % (self.value, LabeledProperty.get_label_display(self)) class NamedProperty(models.Model): name = models.CharField(_(\"name\"), max_length=200)", "= models.CharField(_(\"service\"), max_length=30, choices=IM_SERVICES) account = models.CharField(_(\"account\"), help_text=_(\"user name or email address\"), max_length=200)", "= models.CharField(_(\"postal code/zip code\"), max_length=31, blank=True) class Meta: verbose_name = _(\"postal address\") verbose_name_plural", "def get_collection(self, instance): return getattr(instance, self.collection_name) def __get__(self, instance, owner): if instance is", "( ('home', _('home')), ('work', _('work')), ('other', _('other')), ) IM_SERVICES = ( ('google', _('Google", "name or email address\"), max_length=200) class Meta: verbose_name = _(\"IM account\") verbose_name_plural =", "return self.name # primary contact properies email_address = PrimaryPropertyDescriptor('email_addresses') im_account = PrimaryPropertyDescriptor('im_accounts') company", "models.ManyToManyField(Contact, verbose_name=_(\"members\"), blank=True) class Meta: verbose_name = _(\"group\") verbose_name_plural = _(\"groups\") @property def", "value = models.EmailField(_(\"address\")) class Meta: verbose_name = _(\"email address\") verbose_name_plural = _(\"email addresses\")", "class Meta: verbose_name = _(\"phone number\") verbose_name_plural = _(\"phone numbers\") def __unicode__(self): return", "('yahoo', _('Yahoo')), ('msn', _('MSN')), ('icq', _('ICQ')), ('jabber', _('Jabber')), ) class PrimaryPropertyManager(models.Manager): def primary(self):", "class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200, choices=PROPERTY_LABELS) class Meta: abstract = True def", "*args, **kwargs): update_primary = kwargs.pop('update_primary', True) if update_primary: try: existing = self.__class__.objects.exclude(pk=self.id) \\", "implement this for # themselves if different. class LabeledProperty(models.Model): label = models.CharField(_(\"label\"), max_length=200," ]
[ "target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH =", "parameter for linear classifier to get CAVs. alphas = [0.1] target = 'cat'", "= './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir = working_dir + '/cavs/' source_dir", "\"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel,", "import cav as cav import model as model import tcav as tcav import", "import tensorflow as tf working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir", "as tcav import utils as utils import utils_plot as utils_plot # utils_plot requires", "= \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer", "utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter for linear classifier", "mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp) results = mytcav.run() utils_plot.plot_results(results,", "utils_plot as utils_plot # utils_plot requires matplotlib import os import torch import activation_generator", "= ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter for", "import os import torch import activation_generator as act_gen import tensorflow as tf working_dir", "source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a", "random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp) results = mytcav.run()", "cav as cav import model as model import tcav as tcav import utils", "get CAVs. alphas = [0.1] target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"]", "[0.1] target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH", "torch import activation_generator as act_gen import tensorflow as tf working_dir = './tcav_class_test' activation_dir", "activation_generator as act_gen import tensorflow as tf working_dir = './tcav_class_test' activation_dir = working_dir", "mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30", "folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp) results", "'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100)", "(random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp) results =", "# this is a regularizer penalty parameter for linear classifier to get CAVs.", "= [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH)", "penalty parameter for linear classifier to get CAVs. alphas = [0.1] target =", "'./data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp =", "model import tcav as tcav import utils as utils import utils_plot as utils_plot", "utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter for linear classifier to get", "'/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is", "CAVs. alphas = [0.1] target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart", "alphas = [0.1] target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart =", "# folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp)", "to get CAVs. alphas = [0.1] target = 'cat' concepts = [\"dotted\", \"striped\",", "as act_gen import tensorflow as tf working_dir = './tcav_class_test' activation_dir = working_dir +", "act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0,", "matplotlib import os import torch import activation_generator as act_gen import tensorflow as tf", "source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1) mytcav =", "import torch import activation_generator as act_gen import tensorflow as tf working_dir = './tcav_class_test'", "as utils import utils_plot as utils_plot # utils_plot requires matplotlib import os import", "+ '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this", "\"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator =", "requires matplotlib import os import torch import activation_generator as act_gen import tensorflow as", "utils import utils_plot as utils_plot # utils_plot requires matplotlib import os import torch", "working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir = working_dir + '/cavs/'", "= working_dir + '/activations/' cav_dir = working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks", "[\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator", "tcav as tcav import utils as utils import utils_plot as utils_plot # utils_plot", "\"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty", "working_dir + '/activations/' cav_dir = working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks =", "= 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir,", "utils_plot requires matplotlib import os import torch import activation_generator as act_gen import tensorflow", "['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter for linear", "= 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt'", "bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter", "a regularizer penalty parameter for linear classifier to get CAVs. alphas = [0.1]", "= 30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas,", "'./tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir = working_dir + '/cavs/' source_dir =", "activation_dir = working_dir + '/activations/' cav_dir = working_dir + '/cavs/' source_dir = \"./data/\"", "as cav import model as model import tcav as tcav import utils as", "= tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir, num_random_exp=num_random_exp) results = mytcav.run() utils_plot.plot_results(results, num_random_exp=num_random_exp)", "model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders", "num_random_exp = 30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator,", "cav import model as model import tcav as tcav import utils as utils", "30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks, act_generator, alphas, cav_dir=cav_dir,", "max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts,", "concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel =", "import activation_generator as act_gen import tensorflow as tf working_dir = './tcav_class_test' activation_dir =", "classifier to get CAVs. alphas = [0.1] target = 'cat' concepts = [\"dotted\",", "tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target, concepts, bottlenecks,", "activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1) mytcav = tcav.TCAV(target,", "regularizer penalty parameter for linear classifier to get CAVs. alphas = [0.1] target", "working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) #", "tf working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir = working_dir +", "import utils as utils import utils_plot as utils_plot # utils_plot requires matplotlib import", "act_gen import tensorflow as tf working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/'", "import tcav as tcav import utils as utils import utils_plot as utils_plot #", "import model as model import tcav as tcav import utils as utils import", "# utils_plot requires matplotlib import os import torch import activation_generator as act_gen import", "= model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 #", "utils as utils import utils_plot as utils_plot # utils_plot requires matplotlib import os", "LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0)", "'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel", "model as model import tcav as tcav import utils as utils import utils_plot", "for linear classifier to get CAVs. alphas = [0.1] target = 'cat' concepts", "= working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir)", "is a regularizer penalty parameter for linear classifier to get CAVs. alphas =", "act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1) mytcav", "this is a regularizer penalty parameter for linear classifier to get CAVs. alphas", "= [0.1] target = 'cat' concepts = [\"dotted\", \"striped\", \"zigzagged\"] random_counterpart = 'random500_1'", "as model import tcav as tcav import utils as utils import utils_plot as", "utils_plot # utils_plot requires matplotlib import os import torch import activation_generator as act_gen", "+ '/activations/' cav_dir = working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2']", "= act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp = 30 # folders (random500_0, random500_1)", "as tf working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir = working_dir", "tcav import utils as utils import utils_plot as utils_plot # utils_plot requires matplotlib", "os import torch import activation_generator as act_gen import tensorflow as tf working_dir =", "utils.make_dir_if_not_exists(working_dir) utils.make_dir_if_not_exists(cav_dir) # this is a regularizer penalty parameter for linear classifier to", "linear classifier to get CAVs. alphas = [0.1] target = 'cat' concepts =", "= './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir, activation_dir, max_examples=100) tf.compat.v1.logging.set_verbosity(0) num_random_exp", "'/activations/' cav_dir = working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir)", "as utils_plot # utils_plot requires matplotlib import os import torch import activation_generator as", "import utils_plot as utils_plot # utils_plot requires matplotlib import os import torch import", "random_counterpart = 'random500_1' LABEL_PATH = './data/imagenet_comp_graph_label_strings.txt' mymodel = model.CNNWrapper(LABEL_PATH) act_generator = act_gen.ImageActivationGenerator(mymodel, source_dir,", "tensorflow as tf working_dir = './tcav_class_test' activation_dir = working_dir + '/activations/' cav_dir =", "cav_dir = working_dir + '/cavs/' source_dir = \"./data/\" bottlenecks = ['conv2'] utils.make_dir_if_not_exists(activation_dir) utils.make_dir_if_not_exists(working_dir)" ]
[ "executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if", "= self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData", "= Job.STATE.FAILED result = False ## set job finished if self.jobId: self.backend.post_job(backendJobData) return", "installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add", "raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version = None if 'version' in", "bundleId %s: %s' % (bundleId, app)) if app and '_id' in app: self.appId", "def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def", "@classmethod def job_from_dict(cls, jobDict, backend, device): job = None if 'type' in jobDict:", "bundleId = jobInfo['bundleId'] version = None if 'version' in jobInfo: version = jobInfo['version']", "InstallAppJobs to exist/run without a corresponding backendJob backendJobData = {} if self.jobId: backendJobData", "installation failed\") ## add app to backend ### the app data is currently", "% bundleId) self.device.uninstall(bundleId) # # save the results and install the app if", "logger.debug('app is installed now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving", "self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except Exception, e: raise JobExecutionError('unable to", "bundleId given') bundleId = jobInfo['bundleId'] version = None if 'version' in jobInfo: version", "logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running',", "__init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self,", "= None if 'type' in jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP:", "= jobDict if not '_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId =", "before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo", "= jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device is running", "to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except", "check for ipa-size < ~50MB if app and 'fileSizeBytes' in app: size =", "= self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since", "appData['trackName'] self.appId = self.backend.post_app(appData) # end install via appstore return not alreadyInstalled elif", "pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ## add app", "self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo']", "jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device = device self.backend =", "self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True else: logging.warning('installing", "'%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId,", "device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType", "app archiving since device is running iOS 9 or later\") else: logger.debug(\"check if", "registered with backend if self.appId and alreadyInstalled: # app is installed and registred", "app %s' % bundleId) # use device data due to better version data", "execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set", "jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier']", "return True else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s on", "appstore app %s' % bundleId) # use device data due to better version", "self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True", "install app') raise JobExecutionError('Unable to install app') logger.debug('app is installed now') self.appId =", "return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app", "if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from", "u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict =", "else: logging.warning('installing the app via device handler failed! - Install via AppStore instead')", "to avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping install from backend to", "elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD:", "'%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId,", "%s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk", "self.appId and alreadyInstalled: # app is installed and registred with backend logger.info('App is", "self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable", "for matching version number if version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion:", "to install the app will raise a JobExecutionError on failure returns: True if", "= Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise", "number if version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app", "logger.info('installing app %s via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App", "app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save the results and install the", "store = AppStore(storeCountry) trackId = 0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId)", "TypeError: logger.error('Unable to encode app archive!') #delete app archive from disk os.remove(appPath) if", "''' try to install the app returns: True if the app was just", "not 'appType' in jobInfo: raise JobExecutionError('no app type given') if not 'bundleId' in", "elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not", "app type') def execute(self): logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId, self.device))", "device, jobDict) self.appId = None def _install_app(self, pilot): ''' try to install the", "Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo", "error (unknown ipa size)') # case 3: install from appstore # case 4:", "appPath) os.remove(appPath) #delete app from disk except Exception, e: raise JobExecutionError('unable to archive", "< 40000000: # actually install from backend logger.info('installing app %s from backend (size:", "is currently taken from ideviceinstaller (via device.installed_apps) ### alternatively the pilot could be", "to encode app archive!') #delete app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId)", "in installedApps: logger.debug('app %s is already installed' % bundleId) # check for matching", "installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save the results and", "'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return True", "% (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except Exception, e:", "% bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation", "installed and registred with backend <%s>' % self.appId) return False # case 2:", "None def _install_app(self, pilot): ''' try to install the app returns: True if", "logger.debug('app %s is already installed' % bundleId) # check for matching version number", "not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\"", "= self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in", "self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ##", "for ipa-size < ~50MB if app and 'fileSizeBytes' in app: size = 0", "self.appId = self.backend.post_app(appData) # end install via appstore return not alreadyInstalled elif 'CydiaApp'", "False if the app was already installed before ''' logger.debug('_installApp') installJobDict = {", "from backend elif self.appId: # install from backend # dirty check for ipa-size", "def job_from_dict(cls, jobDict, backend, device): job = None if 'type' in jobDict: jobType", "class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId", "trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get", "jobDict['command'] def execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else:", "accountId = acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to find a valid", "install from backend to avoid ideviceinstaller error (unknown ipa size)') # case 3:", "%s' % bundleId) # use device data due to better version data installedApps", "backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process =", "jobDict, backend, device): job = None if 'type' in jobDict: jobType = jobDict['type']", "the results and install the app if not previously installed # self._save_run_results(runId, bundleId,", "failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job", "## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone", "appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' % bundleId)", "bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e:", "= 'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch", "ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution", "{ '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob')", "'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId", "jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType ==", "return True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict)", "executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, }", "0 and bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in", "logger.debug('fetch app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s", "self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '': raise", "version installed!') # the app is already installed and versions are compatible alreadyInstalled", "bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\")", "self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend,", "% (bundleId, app)) if app and '_id' in app: self.appId = app['_id'] #", "bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read() f.close()", "only if not alreadyInstalled: # install via appstore logger.info('installing app %s via appstore'", "bundleId) # use device data due to better version data installedApps = self.device.installed_apps()", "logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo']", "self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run')", "in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId =", "appstore # case 4: installed but unregistred storeCountry = 'de' if 'storeCountry' in", "jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) # use device data due to", "set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True", "os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device %s\" %", "app is already installed and versions are compatible alreadyInstalled = True # check", "# case 3 only if not alreadyInstalled: # install via appstore logger.info('installing app", "jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise", "in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True else:", "appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo =", "is already installed' % bundleId) # check for matching version number if version:", "logger.error('jobDict does not contain a type!') if job: logger.info('job created: %s' % str(job))", "Exception, e: raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _install_app(self,", "return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia' %", "= None if 'version' in jobInfo: version = jobInfo['version'] #check app type if", "raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] >", "if version != installedVersion: raise JobExecutionError('wrong app version installed!') # the app is", "was just installed False if the app was already installed before ''' logger.debug('_install_app')", "__init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def _install_app(self,", "app will raise a JobExecutionError on failure returns: True if the app was", "app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from", "JobExecutionError on failure returns: True if the app was just installed False if", "self.device.installed_apps() # check if app already installed alreadyInstalled = False if bundleId in", "logger.debug(\"skipping app archiving since device is running iOS 9 or later\") else: if", "self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo:", "JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8:", "backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state']", "archiving since device is running iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId):", "Install via AppStore instead') else: logger.warning('unable to get app archive from backend. appId:", "jobDict['_id'] self.device = device self.backend = backend def execute(self): raise NotImplementedError class InstallAppJob(Job):", "(self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath)", "appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except Exception, e: raise JobExecutionError('unable", "already installed before ''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] }", "installed before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given')", "= self.device.installed_apps() # check if app already installed alreadyInstalled = False if bundleId", "size < 40000000: # actually install from backend logger.info('installing app %s from backend", "execute the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy:", "Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return", "identifier') logger.debug('using account %s' % accountId) # case 3 only if not alreadyInstalled:", "try: size = int(app['fileSizeBytes']) except ValueError: size = -1 if size > 0", "without a corresponding backendJob backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ##", "str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ## set job finished if self.jobId:", "result = False ## set job finished if self.jobId: self.backend.post_job(backendJobData) return result class", "self.appId: # install from backend # dirty check for ipa-size < ~50MB if", "def execute(self): logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId, self.device)) backendJobData =", "base64 import time from enum import Enum from store import AppStore, AppStoreException from", "< ~50MB if app and 'fileSizeBytes' in app: size = 0 try: size", "store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get appInfo: %s ', e) raise", "unregistred storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get", "'.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read() f.close() try: appData", "str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state'] =", "# check for matching version number if version: installedVersion = installedApps[bundleId]['version'] if version", "u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict = jobDict if not '_id'", "(bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app", "self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e))", "device, jobDict): self.jobDict = jobDict if not '_id' in jobDict: raise JobExecutionError('No jobId", "self.appId) return False # case 2: install from backend elif self.appId: # install", "pilot could be used to access the /applications rest api appData = store.get_app_data(trackId)", "installedApps: logger.debug('app %s is already installed' % bundleId) # check for matching version", "if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId,", "archive app binary: %s' % str(e)) def _install_app(self, pilot): ''' try to install", "return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device,", "if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData)", "else: for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if", "/applications rest api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId", "jobDict): self.jobDict = jobDict if not '_id' in jobDict: raise JobExecutionError('No jobId present')", "backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state']", "present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app", "failed\") raise JobExecutionError(\"App installation failed\") ## add app to backend ### the app", "archiving since device is running iOS 9 or later\") else: logger.debug(\"check if backend", "backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' %", "AppStore instead') else: logger.warning('unable to get app archive from backend. appId: <%s>' %", "self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\") class", "self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\" %", "Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd'])", "+ '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read() f.close() try:", "app was already installed before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise", "%s via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\")", "= Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod", "None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app binary", "backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self, bundleId):", "%s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId)", "= device self.backend = backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def", "= InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install app')", "install from backend logger.info('installing app %s from backend (size: %s)' % (bundleId,size)) if", "not alreadyInstalled: # install via appstore logger.info('installing app %s via appstore' % bundleId)", "app %s via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation", "ipa size)') # case 3: install from appstore # case 4: installed but", "app and 'fileSizeBytes' in app: size = 0 try: size = int(app['fileSizeBytes']) except", "execute(self): logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId, self.device)) # allow InstallAppJobs", "in jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device,", "if size > 0 or size < 40000000: # actually install from backend", "# save the results and install the app if not previously installed #", "# check if app already installed alreadyInstalled = False if bundleId in installedApps:", "if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed:", "def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False):", "self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable", "logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId", "not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from", "%s via cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type')", "logging import base64 import time from enum import Enum from store import AppStore,", "executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING)", "and registered with backend if self.appId and alreadyInstalled: # app is installed and", "in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in", "in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore') store", "installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app binary", "if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath):", "+ bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read()", "= jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s'", "missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job = None if", "via device handler failed! - Install via AppStore instead') else: logger.warning('unable to get", "appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' % (bundleId,", "self.backend = backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend,", "% bundleId) # check for matching version number if version: installedVersion = installedApps[bundleId]['version']", "or size < 40000000: # actually install from backend logger.info('installing app %s from", "if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict):", "install from appstore # case 4: installed but unregistred storeCountry = 'de' if", "os.remove(appPath) tries = 3 while tries > 0 and bundleId not in self.device.installed_apps():", "self.jobId = jobDict['_id'] self.device = device self.backend = backend def execute(self): raise NotImplementedError", "device is running iOS 9 or later\") else: logger.debug(\"check if backend already has", "'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo", "None if 'type' in jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job", "if app and '_id' in app: self.appId = app['_id'] # case 1: already", "jobDict) self.appId = None def _install_app(self, pilot): ''' try to install the app", "iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo = {} try: trackId =", "if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ##", "= Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob,", "u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device,", "False ## set job finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/'", "= jobDict['process'] if 'command' in jobDict: self.command = jobDict['command'] def execute(self): if self.process", "pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot) if not self.appId:", "os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read() f.close() try: appData = base64.b64encode(appData)", "% (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath)", "runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if", "class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE", "registred with backend logger.info('App is already installed and registred with backend <%s>' %", "JobExecutionError('no app type given') if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given')", "if accountId == '': raise JobExecutionError('unable to find a valid account identifier') logger.debug('using", "case 1: already installed and registered with backend if self.appId and alreadyInstalled: #", "InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install app') raise", "logger.info('skipping install from backend to avoid ideviceinstaller error (unknown ipa size)') # case", "self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot", "account accountId = '' if alreadyInstalled: # get account info from device installedAppInfo", "else: logger.debug(\"check if backend already has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId)", "backend ### the app data is currently taken from ideviceinstaller (via device.installed_apps) ###", "appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb')", "failed! - Install via AppStore instead') else: logger.warning('unable to get app archive from", "self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device", "f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode", "import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined',", "via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise", "self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No", "installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app version installed!') #", "appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get appInfo: %s ',", "to exist/run without a corresponding backendJob backendJobData = {} if self.jobId: backendJobData =", "and registred with backend logger.info('App is already installed and registred with backend <%s>'", "self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s' % (bundleId, app)) if app", "if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving", "jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process = jobDict['process'] if", "previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId,", "app') logger.debug('app is installed now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId):", "given') bundleId = jobInfo['bundleId'] version = None if 'version' in jobInfo: version =", "from backend logger.info('installing app %s from backend (size: %s)' % (bundleId,size)) if not", "jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does", "in app: size = 0 try: size = int(app['fileSizeBytes']) except ValueError: size =", "device is running iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy", "appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) #", "dirty check for ipa-size < ~50MB if app and 'fileSizeBytes' in app: size", "app already installed alreadyInstalled = False if bundleId in installedApps: logger.debug('app %s is", "uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH +", "int(app['fileSizeBytes']) except ValueError: size = -1 if size > 0 or size <", "%s' % str(appInfo)) ## get account accountId = '' if alreadyInstalled: # get", "in jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version = None if", "= installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app version installed!') # the", "Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId: raise", "True try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo", "jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no app type given')", "backend logger.info('App is already installed and registred with backend <%s>' % self.appId) return", "from backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa'", "JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId,", "### the app data is currently taken from ideviceinstaller (via device.installed_apps) ### alternatively", "handler failed! - Install via AppStore instead') else: logger.warning('unable to get app archive", "else: if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for acc in self.device.accounts():", "case 3 only if not alreadyInstalled: # install via appstore logger.info('installing app %s", "device, jobDict) else: logger.error('jobDict does not contain a type!') if job: logger.info('job created:", "self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device is running iOS 9 or", "if 'process' in jobDict: self.process = jobDict['process'] if 'command' in jobDict: self.command =", "AppStoreException as e: logger.error('unable to get appInfo: %s ', e) raise JobExecutionError('unable to", "the app via device handler failed! - Install via AppStore instead') else: logger.warning('unable", "'': raise JobExecutionError('unable to find a valid account identifier') logger.debug('using account %s' %", "an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e:", "= installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for acc", "'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry']", "from disk except Exception, e: raise JobExecutionError('unable to archive app binary: %s' %", "> 8: logger.debug(\"skipping app archiving since device is running iOS 9 or later\")", "Job.STATE.FAILED result = False ## set job finished if self.jobId: self.backend.post_job(backendJobData) return result", "installed False if the app was already installed before ''' logger.debug('_install_app') if not", "installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app version installed!') # the app", "## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution", "except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result", "execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend,", "''' try to install the app will raise a JobExecutionError on failure returns:", "installedVersion: raise JobExecutionError('wrong app version installed!') # the app is already installed and", "is installed and registred with backend logger.info('App is already installed and registred with", "_execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo =", "app %s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while", "tries > 0 and bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if", "% (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch", "jobDict['process'] if 'command' in jobDict: self.command = jobDict['command'] def execute(self): if self.process and", "the app was already installed before ''' logger.debug('_installApp') installJobDict = { '_id': False,", "logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId,", "better version data installedApps = self.device.installed_apps() # check if app already installed alreadyInstalled", "return True else: logging.warning('installing the app via device handler failed! - Install via", "executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive", "JobExecutionError('unable to archive app binary: %s' % str(e)) def _install_app(self, pilot): ''' try", "self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try:", "= base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode app archive!') #delete", "bundleId) self.device.uninstall(bundleId) # # save the results and install the app if not", "accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ## add app to", "app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' %", "finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device,", "__init__(self, backend, device, jobDict): self.jobDict = jobDict if not '_id' in jobDict: raise", "= { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing", "backend to avoid ideviceinstaller error (unknown ipa size)') # case 3: install from", "install the app if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set", "% bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler' % bundleId)", "installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for acc in", "tries = 3 while tries > 0 and bundleId not in self.device.installed_apps(): tries", "super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process = jobDict['process'] if 'command'", "def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict:", "from backend. appId: <%s>' % self.appId) else: logger.info('skipping install from backend to avoid", "elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return", "to archive app binary: %s' % str(e)) def _install_app(self, pilot): ''' try to", "backendJobData['state'] = Job.STATE.FAILED result = False ## set job finished if self.jobId: self.backend.post_job(backendJobData)", "installedApps = self.device.installed_apps() # check if app already installed alreadyInstalled = False if", "device, jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try:", "on failure returns: True if the app was just installed False if the", "job = InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device,", "== storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to find", "appInfo: %s' % str(appInfo)) ## get account accountId = '' if alreadyInstalled: #", "else: logger.info('skipping install from backend to avoid ideviceinstaller error (unknown ipa size)') #", "% accountId) # case 3 only if not alreadyInstalled: # install via appstore", "if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId", "install from backend to avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping install", "jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version = None if 'version'", "as e: logger.error('unable to get appInfo: %s ', e) raise JobExecutionError('unable to get", "jobInfo: version = jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore", "= jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId", "- Install via AppStore instead') else: logger.warning('unable to get app archive from backend.", "add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot,", "or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in", "install via appstore logger.info('installing app %s via appstore' % bundleId) if not pilot.install_appstore(appInfo,", "app archive from backend. appId: <%s>' % self.appId) else: logger.info('skipping install from backend", "str(e)) def _install_app(self, pilot): ''' try to install the app will raise a", "f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to", "finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend, device,", "existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s' %", "self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) #", "later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo:", "'version' in jobInfo: version = jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']:", "= RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict)", "from store import AppStore, AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class", "to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId,", "TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict = jobDict", "Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain a type!')", "bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob", "avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping install from backend to avoid", "installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo']", "not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to install app') logger.debug('app is", "% str(appInfo)) ## get account accountId = '' if alreadyInstalled: # get account", "False ## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job):", "the app was just installed False if the app was already installed before", "if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) # use device", "pilot): ''' try to install the app will raise a JobExecutionError on failure", "= -1 if size > 0 or size < 40000000: # actually install", "if os.path.exists(appPath): f = open(appPath, 'rb') appData = f.read() f.close() try: appData =", "JobExecutionError('wrong app version installed!') # the app is already installed and versions are", "logger.error('unable to get appInfo: %s ', e) raise JobExecutionError('unable to get appInfo: AppStoreException')", "False # case 2: install from backend elif self.appId: # install from backend", "accountId == '': raise JobExecutionError('unable to find a valid account identifier') logger.debug('using account", "if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or", "rest api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId =", "% bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries > 0 and bundleId", "handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries > 0 and", "installation failed\") raise JobExecutionError(\"App installation failed\") ## add app to backend ### the", "to better version data installedApps = self.device.installed_apps() # check if app already installed", "backend for already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId", "self.backend.post_app(appData) # end install via appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']:", "= {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e:", "= installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ###", "store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get appInfo: %s", "'' if alreadyInstalled: # get account info from device installedAppInfo = self.device.installed_apps()[bundleId] if", "disk except Exception, e: raise JobExecutionError('unable to archive app binary: %s' % str(e))", "## get account accountId = '' if alreadyInstalled: # get account info from", "0 or size < 40000000: # actually install from backend logger.info('installing app %s", "appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode app archive!')", "already installed and registred with backend <%s>' % self.appId) return False # case", "the app was already installed before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict:", "to get appInfo: %s ', e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo']", "class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app',", "# self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except", "', e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo:", "if bundleId in self.device.installed_apps(): return True else: logging.warning('installing the app via device handler", "logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId, self.device)) # allow InstallAppJobs to", "check the backend for already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result", "in app: self.appId = app['_id'] # case 1: already installed and registered with", "installed and registred with backend logger.info('App is already installed and registred with backend", "bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device is", "alreadyInstalled: # install via appstore logger.info('installing app %s via appstore' % bundleId) if", "self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process = jobDict['process'] if 'command' in", "InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else:", "if not alreadyInstalled: # install via appstore logger.info('installing app %s via appstore' %", "### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' %", "''' execute the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if", "self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob,", "jobId present') self.jobId = jobDict['_id'] self.device = device self.backend = backend def execute(self):", "self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f =", "if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if", "app if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished", "except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData)", "is installed now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s'", "via appstore logger.info('installing app %s via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId,", "app['_id'] # case 1: already installed and registered with backend if self.appId and", "backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict):", "self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot) if not", "jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ###", "= tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True else: logging.warning('installing the app", "JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job = None if 'type' in", "= self.backend.post_app(appData) # end install via appstore return not alreadyInstalled elif 'CydiaApp' ==", "{ 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self,", "app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job", "raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device = device self.backend = backend", "version number if version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong", "account identifier') logger.debug('using account %s' % accountId) # case 3 only if not", "jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId = self.backend.post_run(self.appId,", "Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot) if", "logger.debug('archiving %s' % bundleId) try: ### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH,", "8: logger.debug(\"skipping app archiving since device is running iOS 9 or later\") else:", "account %s' % accountId) # case 3 only if not alreadyInstalled: # install", "%s' % accountId) # case 3 only if not alreadyInstalled: # install via", "in jobInfo: version = jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing", "logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to install", "raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device,", "acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to", "pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId,", "store import AppStore, AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception):", "app: self.appId = app['_id'] # case 1: already installed and registered with backend", "u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict", "with backend logger.info('App is already installed and registred with backend <%s>' % self.appId)", "jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s'", "the pilot could be used to access the /applications rest api appData =", "size = -1 if size > 0 or size < 40000000: # actually", "%s on device %s\" % (self.jobId, self.device)) # allow InstallAppJobs to exist/run without", "True # check the backend for already existing app app = self.backend.get_app_bundleId(bundleId, version)", "(self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing", "appInfo: %s ', e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo", "True else: logging.warning('installing the app via device handler failed! - Install via AppStore", "archive from backend. appId: <%s>' % self.appId) else: logger.info('skipping install from backend to", "'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId =", "encode app archive!') #delete app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def", "in jobInfo: accountId = jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry'] ==", "are compatible alreadyInstalled = True # check the backend for already existing app", "jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict)", "already installed' % bundleId) # check for matching version number if version: installedVersion", "job = RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device,", "access the /applications rest api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] =", "%s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except Exception,", "class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job = None if 'type'", "version = None if 'version' in jobInfo: version = jobInfo['version'] #check app type", "app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath,", "None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to", "raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device):", "self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId =", "to avoid ideviceinstaller error (unknown ipa size)') # case 3: install from appstore", "Exception, e: raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _execute_app(self,", "raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise", "appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end install via appstore return not", "40000000: # actually install from backend logger.info('installing app %s from backend (size: %s)'", "the /applications rest api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName']", "jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job", "= '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' % bundleId) if", "to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa'", "# dirty check for ipa-size < ~50MB if app and 'fileSizeBytes' in app:", "import logging import base64 import time from enum import Enum from store import", "bundleId, runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId,", "if alreadyInstalled: # get account info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId'", "self.command) else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict,", "will raise a JobExecutionError on failure returns: True if the app was just", "jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job", "class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId", "in self.device.installed_apps(): return True else: logging.warning('installing the app via device handler failed! -", "%s: %s' % (bundleId, app)) if app and '_id' in app: self.appId =", "data is currently taken from ideviceinstaller (via device.installed_apps) ### alternatively the pilot could", "the backend for already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for", "logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ##", "True if the app was just installed False if the app was already", "%s ', e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using", "from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else:", "## set job finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def", "was already installed before ''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo']", "_archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app binary to backend", "app was already installed before ''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo':", "%s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device", "Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def", "app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s", "appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as", "e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s'", "installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict)", "is running iOS 9 or later\") else: logger.debug(\"check if backend already has an", "(via device.installed_apps) ### alternatively the pilot could be used to access the /applications", "Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending',", "bundleId) # check for matching version number if version: installedVersion = installedApps[bundleId]['version'] if", "self.appId = None def _install_app(self, pilot): ''' try to install the app returns:", "alreadyInstalled: # app is installed and registred with backend logger.info('App is already installed", "= store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get appInfo: %s ', e)", "backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def _install_app(self, pilot):", "try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode app", "uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job", "taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH,", "set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed:", "logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ## add app to backend ###", "with backend <%s>' % self.appId) return False # case 2: install from backend", "% str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the app '''", "#delete app from disk except Exception, e: raise JobExecutionError('unable to archive app binary:", "= appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get account accountId = ''", "try: ### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa'", "### alternatively the pilot could be used to access the /applications rest api", "%s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries", "run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\"", "%s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] =", "Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED", "and '_id' in app: self.appId = app['_id'] # case 1: already installed and", "jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore') store =", "self.device = device self.backend = backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/'", "app)) if app and '_id' in app: self.appId = app['_id'] # case 1:", "JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job", "finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\" %", "size = int(app['fileSizeBytes']) except ValueError: size = -1 if size > 0 or", "except AppStoreException as e: logger.error('unable to get appInfo: %s ', e) raise JobExecutionError('unable", "% self.appId) else: logger.info('skipping install from backend to avoid ideviceinstaller error (ipa to", "tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True else: logging.warning('installing the", "= store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end", "and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\")", "device): job = None if 'type' in jobDict: jobType = jobDict['type'] if jobType", "JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version = None if 'version' in jobInfo:", "installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to", "accountId = '' if alreadyInstalled: # get account info from device installedAppInfo =", "self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no app type given') if not", "self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries", "self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s'", "backend <%s>' % self.appId) return False # case 2: install from backend elif", "avoid ideviceinstaller error (unknown ipa size)') # case 3: install from appstore #", "execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process", "## add app to backend ### the app data is currently taken from", "enum import Enum from store import AppStore, AppStoreException from pilot import Pilot logger", "ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain a type!') if job: logger.info('job", "version != installedVersion: raise JobExecutionError('wrong app version installed!') # the app is already", "appData['account'] = accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end install via", "time from enum import Enum from store import AppStore, AppStoreException from pilot import", "app binary: %s' % str(e)) def _install_app(self, pilot): ''' try to install the", "# app is installed and registred with backend logger.info('App is already installed and", "logger.debug(\"check if backend already has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state']", "= jobInfo['bundleId'] version = None if 'version' in jobInfo: version = jobInfo['version'] #check", "= None def _install_app(self, pilot): ''' try to install the app returns: True", "'rb') appData = f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except", "bundleId in self.device.installed_apps(): return True else: logging.warning('installing the app via device handler failed!", "logger.debug('using account %s' % accountId) # case 3 only if not alreadyInstalled: #", "~50MB if app and 'fileSizeBytes' in app: size = 0 try: size =", "InstallAppJob %s on device %s\" % (self.jobId, self.device)) # allow InstallAppJobs to exist/run", "appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App", "set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone =", "InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId =", "ValueError: size = -1 if size > 0 or size < 40000000: #", "else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend,", "self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f = open(appPath, 'rb') appData =", "#check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId)", "except ValueError: size = -1 if size > 0 or size < 40000000:", "pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId", "''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend,", "check for matching version number if version: installedVersion = installedApps[bundleId]['version'] if version !=", "backend. appId: <%s>' % self.appId) else: logger.info('skipping install from backend to avoid ideviceinstaller", "backend # dirty check for ipa-size < ~50MB if app and 'fileSizeBytes' in", "'fileSizeBytes' in app: size = 0 try: size = int(app['fileSizeBytes']) except ValueError: size", "save the results and install the app if not previously installed # self._save_run_results(runId,", "self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId)", "allow InstallAppJobs to exist/run without a corresponding backendJob backendJobData = {} if self.jobId:", "appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping", "already installed and registered with backend if self.appId and alreadyInstalled: # app is", "self.process = jobDict['process'] if 'command' in jobDict: self.command = jobDict['command'] def execute(self): if", "} if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True):", "appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia'", "jobDict: self.command = jobDict['command'] def execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url())", "app') raise JobExecutionError('Unable to install app') logger.debug('app is installed now') self.appId = installJob.appId", "returns: True if the app was just installed False if the app was", "jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving", "Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No", "running iOS 9 or later\") else: logger.debug(\"check if backend already has an app", "= Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\")", "= accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end install via appstore", "= '' if alreadyInstalled: # get account info from device installedAppInfo = self.device.installed_apps()[bundleId]", "type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) # use", "corresponding backendJob backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job", "# the app is already installed and versions are compatible alreadyInstalled = True", "executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add", "execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ## set", "installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to install app') logger.debug('app is installed", "= app['_id'] # case 1: already installed and registered with backend if self.appId", "self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get account accountId =", "{} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable", "running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled", "self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot", "for bundleId %s: %s' % (bundleId, app)) if app and '_id' in app:", "storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry)", "self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to", "installed but unregistred storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry']", "given') if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId']", "if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s", "<gh_stars>1-10 import os import logging import base64 import time from enum import Enum", "= f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable", "versions are compatible alreadyInstalled = True # check the backend for already existing", "acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to find a valid account identifier')", "pilot): ''' try to install the app returns: True if the app was", "if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend", "get appInfo: %s ', e) raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] =", "return False ## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class", "'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version = None", "bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries > 0 and bundleId not", "#delete app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob", "raise JobExecutionError('unable to find a valid account identifier') logger.debug('using account %s' % accountId)", "but unregistred storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ##", "used to access the /applications rest api appData = store.get_app_data(trackId) appData['account'] = accountId", "True else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s on device", "## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result =", "_install_app(self, pilot): ''' try to install the app will raise a JobExecutionError on", "backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy)", "valid account identifier') logger.debug('using account %s' % accountId) # case 3 only if", "# case 3: install from appstore # case 4: installed but unregistred storeCountry", "logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ##", "= False ## set job finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job):", "def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def", "self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if os.path.exists(appPath): f", "except TypeError: logger.error('Unable to encode app archive!') #delete app archive from disk os.remove(appPath)", "self.appId = app['_id'] # case 1: already installed and registered with backend if", "the app data is currently taken from ideviceinstaller (via device.installed_apps) ### alternatively the", "self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result", "app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s' % (bundleId,", "logger.debug('post_run') ## add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot", "result = True try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId", "__init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process", "to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app", "== '': raise JobExecutionError('unable to find a valid account identifier') logger.debug('using account %s'", "else: logger.error('jobDict does not contain a type!') if job: logger.info('job created: %s' %", "if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP:", "appId: <%s>' % self.appId) else: logger.info('skipping install from backend to avoid ideviceinstaller error", "size = 0 try: size = int(app['fileSizeBytes']) except ValueError: size = -1 if", "in jobDict: self.command = jobDict['command'] def execute(self): if self.process and self.execute: pilot =", "(%s)\" % bundleId) self.device.uninstall(bundleId) # # save the results and install the app", "backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict):", "'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) # use device data", "jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' %", "raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _execute_app(self, pilot, bundleId,", "(size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH,", "not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy']", "(self.jobId, self.device)) # allow InstallAppJobs to exist/run without a corresponding backendJob backendJobData =", "os import logging import base64 import time from enum import Enum from store", "e: raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _install_app(self, pilot):", "JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE =", "later\") else: logger.debug(\"check if backend already has an app ipa\") if not self.backend.has_app_archive(self.appId):", "alreadyInstalled = True # check the backend for already existing app app =", "exist/run without a corresponding backendJob backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId)", "already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s'", "installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else:", "str(appInfo)) ## get account accountId = '' if alreadyInstalled: # get account info", "= InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict)", "'process' in jobDict: self.process = jobDict['process'] if 'command' in jobDict: self.command = jobDict['command']", "try to install the app returns: True if the app was just installed", "since device is running iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId)", "Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend,", "= self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId", "bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler' % bundleId) self.device.install(appPath)", "app returns: True if the app was just installed False if the app", "device data due to better version data installedApps = self.device.installed_apps() # check if", "= self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url())", "failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ## set job", "app archive!') #delete app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self):", "results and install the app if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone)", "alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId)", "# get account info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo:", "if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version", "self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo:", "logger.debug('installing appstore app %s' % bundleId) # use device data due to better", "# # save the results and install the app if not previously installed", "appInfo from iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo = {} try:", "app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) #", "to install the app returns: True if the app was just installed False", "installed' % bundleId) # check for matching version number if version: installedVersion =", "or later\") else: logger.debug(\"check if backend already has an app ipa\") if not", "raise JobExecutionError('no app type given') if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId", "data installedApps = self.device.installed_apps() # check if app already installed alreadyInstalled = False", "% (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath):", "version) logger.debug('backend result for bundleId %s: %s' % (bundleId, app)) if app and", "JobExecutionError('Unable to install app') logger.debug('app is installed now') self.appId = installJob.appId return installJob.appJustInstalled", "alreadyInstalled = False if bundleId in installedApps: logger.debug('app %s is already installed' %", "device %s\" % (self.jobId, self.device)) # allow InstallAppJobs to exist/run without a corresponding", "e: raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _execute_app(self, pilot,", "account info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId =", "backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not", "app data is currently taken from ideviceinstaller (via device.installed_apps) ### alternatively the pilot", "= self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s' % (bundleId, app)) if", "ideviceinstaller error (unknown ipa size)') # case 3: install from appstore # case", "self.command = jobDict['command'] def execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process,", "check if app already installed alreadyInstalled = False if bundleId in installedApps: logger.debug('app", "logger.debug('using appInfo: %s' % str(appInfo)) ## get account accountId = '' if alreadyInstalled:", "ideviceinstaller (via device.installed_apps) ### alternatively the pilot could be used to access the", "os.remove(appPath) #delete app from disk except Exception, e: raise JobExecutionError('unable to archive app", "% bundleId) try: ### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath", "jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif", "%s is already installed' % bundleId) # check for matching version number if", "running iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None", "Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend,", "backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state'] = Job.STATE.FINISHED", "set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self,", "'_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device = device", "due to better version data installedApps = self.device.installed_apps() # check if app already", "logger.info('skipping install from backend to avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping", "if not 'appType' in jobInfo: raise JobExecutionError('no app type given') if not 'bundleId'", "not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not", "appPath): logger.info('installing app %s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries =", "<%s>' % self.appId) else: logger.info('skipping install from backend to avoid ideviceinstaller error (ipa", "binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving", "= Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if not self.appId:", "via cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type') def", "logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save the results and install", "acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId ==", "3: install from appstore # case 4: installed but unregistred storeCountry = 'de'", "= {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] =", "APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None", "if the app was already installed before ''' logger.debug('_install_app') if not 'jobInfo' in", "installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy)", "the app if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run", "accountId) # case 3 only if not alreadyInstalled: # install via appstore logger.info('installing", "to install app') logger.debug('app is installed now') self.appId = installJob.appId return installJob.appJustInstalled def", "def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob,", "archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on", "jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no app", "JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result =", "app: size = 0 try: size = int(app['fileSizeBytes']) except ValueError: size = -1", "pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE =", "self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError,", "%s' % bundleId) try: ### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True)", "not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId = jobInfo['bundleId'] version =", "install from backend elif self.appId: # install from backend # dirty check for", "_install_app(self, pilot): ''' try to install the app returns: True if the app", "for already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s:", "jobDict: self.process = jobDict['process'] if 'command' in jobDict: self.command = jobDict['command'] def execute(self):", "storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to find a", "bundleId in installedApps: logger.debug('app %s is already installed' % bundleId) # check for", "execute(self): logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId)", "AppStore(storeCountry) trackId = 0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo =", "storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo", "%s from backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath =", "install the app returns: True if the app was just installed False if", "backendJob backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running", "STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def", "logger.warning('unable to get app archive from backend. appId: <%s>' % self.appId) else: logger.info('skipping", "pilot, bundleId, runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo = {", "= int(app['fileSizeBytes']) except ValueError: size = -1 if size > 0 or size", "import AppStore, AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass", "%s\" % (self.jobId, self.device)) # allow InstallAppJobs to exist/run without a corresponding backendJob", "if the app was already installed before ''' logger.debug('_installApp') installJobDict = { '_id':", "to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo)) ##", "jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId =", "jobInfo: accountId = jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry:", "matching version number if version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise", "if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device is running iOS 9", "tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True else: logging.warning('installing the app via", "pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls,", "os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend' %", "'appType' in jobInfo: raise JobExecutionError('no app type given') if not 'bundleId' in jobInfo:", "backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s", "jobInfo['bundleId'] version = None if 'version' in jobInfo: version = jobInfo['version'] #check app", "installed False if the app was already installed before ''' logger.debug('_installApp') installJobDict =", "device handler failed! - Install via AppStore instead') else: logger.warning('unable to get app", "'command' in jobDict: self.command = jobDict['command'] def execute(self): if self.process and self.execute: pilot", "= True try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\")", "= Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData)", "backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId + '.ipa' if", "try: self.appJustInstalled = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo =", "get account accountId = '' if alreadyInstalled: # get account info from device", "backend, device): job = None if 'type' in jobDict: jobType = jobDict['type'] if", "raise JobExecutionError(\"App installation failed\") ## add app to backend ### the app data", "JobExecutionError('unable to find a valid account identifier') logger.debug('using account %s' % accountId) #", "result for bundleId %s: %s' % (bundleId, app)) if app and '_id' in", "alternatively the pilot could be used to access the /applications rest api appData", "get account info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId", "e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False", "import time from enum import Enum from store import AppStore, AppStoreException from pilot", "self.device.uninstall(bundleId) # # save the results and install the app if not previously", "u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict = jobDict if not '_id' in", "Enum from store import AppStore, AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__)", "size)') # case 3: install from appstore # case 4: installed but unregistred", "def __init__(self, backend, device, jobDict): self.jobDict = jobDict if not '_id' in jobDict:", "'_id' in app: self.appId = app['_id'] # case 1: already installed and registered", "'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType'", "binary: %s' % str(e)) def _install_app(self, pilot): ''' try to install the app", "install from backend # dirty check for ipa-size < ~50MB if app and", "4: installed but unregistred storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry =", "= backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device,", "pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId)", "logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid", "the app returns: True if the app was just installed False if the", "= 3 while tries > 0 and bundleId not in self.device.installed_apps(): tries =", "accountId = jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId", "find a valid account identifier') logger.debug('using account %s' % accountId) # case 3", "# install via appstore logger.info('installing app %s via appstore' % bundleId) if not", "= jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType", "jobDict if not '_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id']", "# end install via appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing", "device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state']", "not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return True", "def execute(self): logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId, self.device)) # allow", "from iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo = {} try: trackId", "super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def _install_app(self, pilot): ''' try to", "disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device %s\"", "None if 'version' in jobInfo: version = jobInfo['version'] #check app type if 'AppStoreApp'", "jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app", "JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return", "self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId, self.device)) backendJobData", "case 2: install from backend elif self.appId: # install from backend # dirty", "!= installedVersion: raise JobExecutionError('wrong app version installed!') # the app is already installed", "end install via appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app", "command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job = None", "logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo = {}", "store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end install", "appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get account accountId = '' if", "False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not", "failure returns: True if the app was just installed False if the app", "RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif", "size > 0 or size < 40000000: # actually install from backend logger.info('installing", "taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to", "alreadyInstalled: # get account info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in", "runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save", "not contain a type!') if job: logger.info('job created: %s' % str(job)) return job", "installed now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' %", "ideviceinstaller error (ipa to large)') else: logger.info('skipping install from backend to avoid ideviceinstaller", "elif self.appId: # install from backend # dirty check for ipa-size < ~50MB", "= Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] =", "if 'type' in jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job =", "9 or later\") else: logger.debug(\"check if backend already has an app ipa\") if", "# allow InstallAppJobs to exist/run without a corresponding backendJob backendJobData = {} if", "not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId, self.backend.RUN_STATE.FINISHED,", "AppStore, AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class", "% (self.jobId, self.device)) # allow InstallAppJobs to exist/run without a corresponding backendJob backendJobData", "a valid account identifier') logger.debug('using account %s' % accountId) # case 3 only", "= acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable to find a valid account", "from backend to avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping install from", "0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException", "<%s>' % self.appId) return False # case 2: install from backend elif self.appId:", "ipa-size < ~50MB if app and 'fileSizeBytes' in app: size = 0 try:", "apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH + bundleId +", "to access the /applications rest api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name']", "= True # check the backend for already existing app app = self.backend.get_app_bundleId(bundleId,", "= jobInfo['accountId'] else: for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId =", "import Enum from store import AppStore, AppStoreException from pilot import Pilot logger =", "backend elif self.appId: # install from backend # dirty check for ipa-size <", "% str(e)) def _install_app(self, pilot): ''' try to install the app will raise", "backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler' %", "uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device %s\" % (self.jobId, self.device))", "logging.warning('installing the app via device handler failed! - Install via AppStore instead') else:", "= Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled = self._install_app(pilot)", "= AppStore(storeCountry) trackId = 0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo", "backend if self.appId and alreadyInstalled: # app is installed and registred with backend", "ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in", "self.device.installed_apps(): return True else: logging.warning('installing the app via device handler failed! - Install", "= Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict = jobDict if", "''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo given') jobInfo =", "== jobInfo['appType']: logger.debug('installing appstore app %s' % bundleId) # use device data due", "= jobDict['_id'] self.device = device self.backend = backend def execute(self): raise NotImplementedError class", "= '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('archiving app %s to %s' % (bundleId, appPath))", "set job finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self,", "api appData = store.get_app_data(trackId) appData['account'] = accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData)", "import os import logging import base64 import time from enum import Enum from", "job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try:", "device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def _install_app(self, pilot): '''", "logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device,", "except Exception, e: raise JobExecutionError('unable to archive app binary: %s' % str(e)) def", "archive!') #delete app archive from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing", "_save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath", "binary: %s' % str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the", "self.appId) else: logger.info('skipping install from backend to avoid ideviceinstaller error (ipa to large)')", "jobInfo['bundleId'] if self.device.ios_version()[0] > 8: logger.debug(\"skipping app archiving since device is running iOS", "get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get", "class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process'", "logger.error('Unable to encode app archive!') #delete app archive from disk os.remove(appPath) if uninstallApp:", "else: logger.info('skipping install from backend to avoid ideviceinstaller error (ipa to large)') else:", "data due to better version data installedApps = self.device.installed_apps() # check if app", "app from disk except Exception, e: raise JobExecutionError('unable to archive app binary: %s'", "backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) result = True try: self.appJustInstalled =", "time.sleep(60) if bundleId in self.device.installed_apps(): return True else: logging.warning('installing the app via device", "False if bundleId in installedApps: logger.debug('app %s is already installed' % bundleId) #", "on device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running", "if 'version' in jobInfo: version = jobInfo['version'] #check app type if 'AppStoreApp' ==", "JobExecutionError('no jobInfo given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no", "self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command", "the app is already installed and versions are compatible alreadyInstalled = True #", "= { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def", "%s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ## set job finished", "def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo", "% bundleId) # use device data due to better version data installedApps =", "if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy =", "= None if 'executionStrategy' in jobInfo: executionStrategy = jobInfo['executionStrategy'] logger.debug('post_run') ## add run", "via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries >", "is running iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy =", "installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to install", "install the app will raise a JobExecutionError on failure returns: True if the", "appData = f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError:", "device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if", "before ''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob =", "now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId)", "is already installed and registred with backend <%s>' % self.appId) return False #", "if the app was just installed False if the app was already installed", "info from device installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId']", "version = jobInfo['version'] #check app type if 'AppStoreApp' == jobInfo['appType']: logger.debug('installing appstore app", "type') def execute(self): logger.info(\"executing InstallAppJob %s on device %s\" % (self.jobId, self.device)) #", "False if the app was already installed before ''' logger.debug('_install_app') if not 'jobInfo'", "app type given') if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId", "= executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\")", "def _install_app(self, pilot): ''' try to install the app returns: True if the", "app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app", "return False # case 2: install from backend elif self.appId: # install from", "on device %s\" % (self.jobId, self.device)) # allow InstallAppJobs to exist/run without a", "logger.debug('Unable to install app') raise JobExecutionError('Unable to install app') logger.debug('app is installed now')", "if not '_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device", "to large)') else: logger.info('skipping install from backend to avoid ideviceinstaller error (unknown ipa", "%s on device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job", "to backend ### the app data is currently taken from ideviceinstaller (via device.installed_apps)", "'_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if", "or command missing\") class JobFactory(object): @classmethod def job_from_dict(cls, jobDict, backend, device): job =", "just installed False if the app was already installed before ''' logger.debug('_installApp') installJobDict", "# case 2: install from backend elif self.appId: # install from backend #", "executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save the", "and alreadyInstalled: # app is installed and registred with backend logger.info('App is already", "from ideviceinstaller (via device.installed_apps) ### alternatively the pilot could be used to access", "compatible alreadyInstalled = True # check the backend for already existing app app", "is already installed and versions are compatible alreadyInstalled = True # check the", "# install from backend # dirty check for ipa-size < ~50MB if app", "= ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain a type!') if job:", "open(appPath, 'rb') appData = f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData)", "bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath = self.APP_ARCHIVE_PATH", "= 0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except", "app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId)", "> 0 or size < 40000000: # actually install from backend logger.info('installing app", "== Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job =", "run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId,", "self.device)) # allow InstallAppJobs to exist/run without a corresponding backendJob backendJobData = {}", "with backend if self.appId and alreadyInstalled: # app is installed and registred with", "(unknown ipa size)') # case 3: install from appstore # case 4: installed", "job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend,", "if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ## set run finished self.backend.post_run(self.appId,", "job_from_dict(cls, jobDict, backend, device): job = None if 'type' in jobDict: jobType =", "logger.info('installing app %s from backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH)", "self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try:", "registred with backend <%s>' % self.appId) return False # case 2: install from", "# case 4: installed but unregistred storeCountry = 'de' if 'storeCountry' in jobInfo:", "# case 1: already installed and registered with backend if self.appId and alreadyInstalled:", "from backend # dirty check for ipa-size < ~50MB if app and 'fileSizeBytes'", "the app will raise a JobExecutionError on failure returns: True if the app", "f = open(appPath, 'rb') appData = f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId,", "job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot)", "0 try: size = int(app['fileSizeBytes']) except ValueError: size = -1 if size >", "accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for", "logger.info('installing app %s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3", "import base64 import time from enum import Enum from store import AppStore, AppStoreException", "Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict): self.jobDict = jobDict if not", "''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy", "backend logger.info('installing app %s from backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH):", "accountId appData['name'] = appData['trackName'] self.appId = self.backend.post_app(appData) # end install via appstore return", "installedAppInfo = self.device.installed_apps()[bundleId] if 'accountId' in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId'", "RunAppJob %s on device %s\" % (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set", "## get appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId = 0", "just installed False if the app was already installed before ''' logger.debug('_install_app') if", "was already installed before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no", "bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # #", "## set job finished backendJobData['state'] = Job.STATE.FINISHED self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def", "to get app archive from backend. appId: <%s>' % self.appId) else: logger.info('skipping install", "be used to access the /applications rest api appData = store.get_app_data(trackId) appData['account'] =", "logger.debug('backend result for bundleId %s: %s' % (bundleId, app)) if app and '_id'", "and registred with backend <%s>' % self.appId) return False # case 2: install", "self.backend.post_job(backendJobData) return True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device,", "= jobInfo['executionStrategy'] logger.debug('post_run') ## add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting", "JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo))", "installed!') # the app is already installed and versions are compatible alreadyInstalled =", "e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result = False", "get appInfo logger.debug('fetch appInfo from iTunesStore') store = AppStore(storeCountry) trackId = 0 appInfo", "device.installed_apps) ### alternatively the pilot could be used to access the /applications rest", "case 4: installed but unregistred storeCountry = 'de' if 'storeCountry' in jobInfo: storeCountry", "self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling", "self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e))", "jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain", "raise a JobExecutionError on failure returns: True if the app was just installed", "execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId)", "has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except JobExecutionError,", "to find a valid account identifier') logger.debug('using account %s' % accountId) # case", "actually install from backend logger.info('installing app %s from backend (size: %s)' % (bundleId,size))", "Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job = InstallAppJob(backend,", "pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s", "backend already has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED", "version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app version installed!')", "from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE", "jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId = None def _install_app(self, pilot): ''' try", "taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo)", "% (self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING", "# use device data due to better version data installedApps = self.device.installed_apps() #", "'type' in jobDict: jobType = jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend,", "if bundleId in installedApps: logger.debug('app %s is already installed' % bundleId) # check", "3 while tries > 0 and bundleId not in self.device.installed_apps(): tries = tries-1", "super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' %", "if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId']", "logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ##", "from enum import Enum from store import AppStore, AppStoreException from pilot import Pilot", "bundleId) try: ### add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath =", "already installed before ''' logger.debug('_install_app') if not 'jobInfo' in self.jobDict: raise JobExecutionError('no jobInfo", "not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId)", "job = None if 'type' in jobDict: jobType = jobDict['type'] if jobType ==", "% str(e)) backendJobData['state'] = Job.STATE.FAILED result = False ## set job finished if", "install app') logger.debug('app is installed now') self.appId = installJob.appId return installJob.appJustInstalled def _archive_app_binary(self,", "installed and versions are compatible alreadyInstalled = True # check the backend for", "logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed'])", "else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s on device %s\"", "def _install_app(self, pilot): ''' try to install the app will raise a JobExecutionError", "app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] =", "base64.b64encode(appData) self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode app archive!') #delete app", "add app to backend ### the app data is currently taken from ideviceinstaller", "logger.info('App is already installed and registred with backend <%s>' % self.appId) return False", "else: logger.warning('unable to get app archive from backend. appId: <%s>' % self.appId) else:", "self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app", "jobDict) if 'process' in jobDict: self.process = jobDict['process'] if 'command' in jobDict: self.command", "if 'accountId' in jobInfo: accountId = jobInfo['accountId'] else: for acc in self.device.accounts(): if", "if installDone: logger.info(\"uninstalling app (%s)\" % bundleId) self.device.uninstall(bundleId) # # save the results", "taken from ideviceinstaller (via device.installed_apps) ### alternatively the pilot could be used to", "taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ## add app to backend", "app via device handler failed! - Install via AppStore instead') else: logger.warning('unable to", "could be used to access the /applications rest api appData = store.get_app_data(trackId) appData['account']", "NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict)", "if backend already has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] =", "device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if 'process' in jobDict: self.process = jobDict['process']", "was just installed False if the app was already installed before ''' logger.debug('_installApp')", "backend to avoid ideviceinstaller error (ipa to large)') else: logger.info('skipping install from backend", "device handler' % bundleId) self.device.install(appPath) os.remove(appPath) tries = 3 while tries > 0", "raise JobExecutionError('unable to archive app binary: %s' % str(e)) def _install_app(self, pilot): '''", "use device data due to better version data installedApps = self.device.installed_apps() # check", "= open(appPath, 'rb') appData = f.read() f.close() try: appData = base64.b64encode(appData) self.backend.post_result(runId, 'app_archive',", "9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy'", "app is installed and registred with backend logger.info('App is already installed and registred", "= self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no app type given') if", "e: logger.error('unable to get appInfo: %s ', e) raise JobExecutionError('unable to get appInfo:", "def execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise", "-1 if size > 0 or size < 40000000: # actually install from", "try: installDone = self._install_app(pilot) if not self.appId: raise JobExecutionError(\"No appId present\") jobInfo =", "RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict) self.appId =", "case 3: install from appstore # case 4: installed but unregistred storeCountry =", "AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get account accountId", "(bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app from disk except Exception, e: raise", "cydia' % bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type') def execute(self):", "'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId,", "in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '':", "logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\"", "try to install the app will raise a JobExecutionError on failure returns: True", "3 only if not alreadyInstalled: # install via appstore logger.info('installing app %s via", "appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' % str(appInfo)) ## get account", "instead') else: logger.warning('unable to get app archive from backend. appId: <%s>' % self.appId)", "'storeCountry' in jobInfo: storeCountry = jobInfo['storeCountry'] ## get appInfo logger.debug('fetch appInfo from iTunesStore')", "== Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType == Job.TYPE.INSTALL_APP: job =", "to install app') raise JobExecutionError('Unable to install app') logger.debug('app is installed now') self.appId", "large)') else: logger.info('skipping install from backend to avoid ideviceinstaller error (unknown ipa size)')", "AppStoreException from pilot import Pilot logger = logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object):", "%s' % (bundleId, app)) if app and '_id' in app: self.appId = app['_id']", "runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl,", "job finished if self.jobId: self.backend.post_job(backendJobData) return result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend,", "iOS 9 or later\") else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if", "logger.debug('archiving app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete app", "already installed and versions are compatible alreadyInstalled = True # check the backend", "in jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device = device self.backend", "'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy'] = executionStrategy pilot.run_auto_execution(bundleId, taskInfo=taskInfo) def _save_run_results(self, runId, bundleId,", "= Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self,", "} installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute(): logger.debug('Unable to", "u'running', u'finished', u'failed']) TYPE = Enum([u'run_app', u'install_app', u'exec_cmd']) def __init__(self, backend, device, jobDict):", "app archiving since device is running iOS 9 or later\") else: if not", "for acc in self.device.accounts(): if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId", "logger.debug(\"skipping app archiving since device is running iOS 9 or later\") else: logger.debug(\"check", "if version: installedVersion = installedApps[bundleId]['version'] if version != installedVersion: raise JobExecutionError('wrong app version", "and bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps():", "else: if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) executionStrategy = None if 'executionStrategy' in jobInfo: executionStrategy", "self.jobDict = jobDict if not '_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId", "(ipa to large)') else: logger.info('skipping install from backend to avoid ideviceinstaller error (unknown", "True class ExecuteCmdJob(Job): def __init__(self, backend, device, jobDict): super(ExecuteCmdJob, self).__init__(backend, device, jobDict) if", "device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict", "from backend to avoid ideviceinstaller error (unknown ipa size)') # case 3: install", "app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone: logger.info(\"uninstalling app (%s)\" %", "present') self.jobId = jobDict['_id'] self.device = device self.backend = backend def execute(self): raise", "= 0 try: size = int(app['fileSizeBytes']) except ValueError: size = -1 if size", "(bundleId, app)) if app and '_id' in app: self.appId = app['_id'] # case", "== jobInfo['appType']: logger.info('installing app %s via cydia' % bundleId) pilot.install_cydia(bundleId) return True else:", "= None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app", "running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot = Pilot(self.device.base_url()) try: installDone = self._install_app(pilot) if", "the app ''' logger.debug('_execute_app') taskInfo = { 'runId':runId, 'backendUrl':self.backend.baseUrl, } if executionStrategy: taskInfo['executionStrategy']", "via AppStore instead') else: logger.warning('unable to get app archive from backend. appId: <%s>'", "archive app binary: %s' % str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): '''", "device, jobDict) if 'process' in jobDict: self.process = jobDict['process'] if 'command' in jobDict:", "jobDict) else: logger.error('jobDict does not contain a type!') if job: logger.info('job created: %s'", "a JobExecutionError on failure returns: True if the app was just installed False", "> 0 and bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId", "in installedAppInfo: accountId = installedAppInfo['accountId'] else: if 'accountId' in jobInfo: accountId = jobInfo['accountId']", "installed before ''' logger.debug('_installApp') installJobDict = { '_id': False, 'jobInfo': self.jobDict['jobInfo'] } installJob", "app and '_id' in app: self.appId = app['_id'] # case 1: already installed", "installed and registered with backend if self.appId and alreadyInstalled: # app is installed", "try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to", "app version installed!') # the app is already installed and versions are compatible", "appstore logger.info('installing app %s via appstore' % bundleId) if not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}):", "self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add", "iOS 9 or later\") else: logger.debug(\"check if backend already has an app ipa\")", "install via appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s", "% self.appId) return False # case 2: install from backend elif self.appId: #", "currently taken from ideviceinstaller (via device.installed_apps) ### alternatively the pilot could be used", "pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished', u'failed']) TYPE = Enum([u'run_app',", "version data installedApps = self.device.installed_apps() # check if app already installed alreadyInstalled =", "= jobDict['command'] def execute(self): if self.process and self.execute: pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command)", "# actually install from backend logger.info('installing app %s from backend (size: %s)' %", "not pilot.install_appstore(appInfo, accountId, taskInfo={'backendUrl':self.backend.baseUrl}): logger.error(\"App installation failed\") raise JobExecutionError(\"App installation failed\") ## add", "= logging.getLogger('worker.'+__name__) class JobExecutionError(Exception): pass class Job(object): STATE = Enum([u'undefined', u'pending', u'running', u'finished',", "%s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished", "self.backend.RUN_STATE.FINISHED, runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state']", "'jobInfo': self.jobDict['jobInfo'] } installJob = InstallAppJob(self.backend, self.device, installJobDict) logger.debug('executing InstallJob') if not installJob.execute():", "if app already installed alreadyInstalled = False if bundleId in installedApps: logger.debug('app %s", "if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler' % bundleId) self.device.install(appPath) os.remove(appPath)", "via appstore return not alreadyInstalled elif 'CydiaApp' == jobInfo['appType']: logger.info('installing app %s via", "from disk os.remove(appPath) if uninstallApp: self.device.uninstall(bundleId) def execute(self): logger.info(\"executing RunAppJob %s on device", "in jobInfo: raise JobExecutionError('no app type given') if not 'bundleId' in jobInfo: raise", "if not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to install app') logger.debug('app", "self).__init__(backend, device, jobDict) self.appId = None def _install_app(self, pilot): ''' try to install", "runId, bundleId, uninstallApp=True): logger.info(\"Saving apparchive to backend\") if self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=False): appPath =", "while tries > 0 and bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60)", "from appstore # case 4: installed but unregistred storeCountry = 'de' if 'storeCountry'", "error (ipa to large)') else: logger.info('skipping install from backend to avoid ideviceinstaller error", "self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if self.device.ios_version()[0]", "%s' % str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the app", "= self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution') self._execute_app(pilot, bundleId, runId, executionStrategy) if installDone:", "app binary: %s' % str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute", "{} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING", "add app binary to backend self.device.archive(bundleId, self.APP_ARCHIVE_PATH, app_only=True) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH,", "given') jobInfo = self.jobDict['jobInfo'] if not 'appType' in jobInfo: raise JobExecutionError('no app type", "if app and 'fileSizeBytes' in app: size = 0 try: size = int(app['fileSizeBytes'])", "InstallJob') if not installJob.execute(): logger.debug('Unable to install app') raise JobExecutionError('Unable to install app')", "raise JobExecutionError('Unable to install app') logger.debug('app is installed now') self.appId = installJob.appId return", "%s' % str(e)) def _install_app(self, pilot): ''' try to install the app will", "backend, device, jobDict): self.jobDict = jobDict if not '_id' in jobDict: raise JobExecutionError('No", "since device is running iOS 9 or later\") else: logger.debug(\"check if backend already", "runId=runId, executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] =", "appData) except TypeError: logger.error('Unable to encode app archive!') #delete app archive from disk", "# check the backend for already existing app app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend", "not '_id' in jobDict: raise JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device =", "% bundleId) pilot.install_cydia(bundleId) return True else: raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing", "app to backend ### the app data is currently taken from ideviceinstaller (via", "installed alreadyInstalled = False if bundleId in installedApps: logger.debug('app %s is already installed'", "raise JobExecutionError('unable to get appInfo: AppStoreException') self.jobDict['appInfo'] = appInfo logger.debug('using appInfo: %s' %", "raise JobExecutionError('invalid app type') def execute(self): logger.info(\"executing InstallAppJob %s on device %s\" %", "already has an app ipa\") if not self.backend.has_app_archive(self.appId): self._archive_app_binary(bundleId) backendJobData['state'] = Job.STATE.FINISHED except", "bundleId) logger.debug('fetch app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app", "= False if bundleId in installedApps: logger.debug('app %s is already installed' % bundleId)", "app was just installed False if the app was already installed before '''", "jobDict['type'] if jobType == Job.TYPE.RUN_APP: job = RunAppJob(backend, device, jobDict) elif jobType ==", "does not contain a type!') if job: logger.info('job created: %s' % str(job)) return", "from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via device handler'", "app %s from backend (size: %s)' % (bundleId,size)) if not os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath", "type given') if not 'bundleId' in jobInfo: raise JobExecutionError('no bundleId given') bundleId =", "1: already installed and registered with backend if self.appId and alreadyInstalled: # app", "failed\") ## add app to backend ### the app data is currently taken", "bundleId): logger.debug('archiving %s' % bundleId) try: ### add app binary to backend self.device.archive(bundleId,", "== Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain a", "self.backend.post_result(runId, 'app_archive', appData) except TypeError: logger.error('Unable to encode app archive!') #delete app archive", "in jobDict: self.process = jobDict['process'] if 'command' in jobDict: self.command = jobDict['command'] def", "and install the app if not previously installed # self._save_run_results(runId, bundleId, uninstallApp=installDone) ##", "get app archive from backend. appId: <%s>' % self.appId) else: logger.info('skipping install from", "app = self.backend.get_app_bundleId(bundleId, version) logger.debug('backend result for bundleId %s: %s' % (bundleId, app))", "JobExecutionError('No jobId present') self.jobId = jobDict['_id'] self.device = device self.backend = backend def", "'app_archive', appData) except TypeError: logger.error('Unable to encode app archive!') #delete app archive from", "jobInfo: raise JobExecutionError('no app type given') if not 'bundleId' in jobInfo: raise JobExecutionError('no", "app %s from backend' % bundleId) if self.backend.get_app_archive(self.appId, appPath): logger.info('installing app %s via", "bundleId not in self.device.installed_apps(): tries = tries-1 time.sleep(60) if bundleId in self.device.installed_apps(): return", "to archive app binary: %s' % str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None):", "already installed alreadyInstalled = False if bundleId in installedApps: logger.debug('app %s is already", "Job.TYPE.INSTALL_APP: job = InstallAppJob(backend, device, jobDict) elif jobType == Job.TYPE.EXEC_CMD: job = ExecuteCmdJob(backend,", "if 'command' in jobDict: self.command = jobDict['command'] def execute(self): if self.process and self.execute:", "bundleId) logger.debug('archiving app %s to %s' % (bundleId, appPath)) self.backend.post_app_archive(self.appId, appPath) os.remove(appPath) #delete", "os.path.exists(self.APP_ARCHIVE_PATH): os.makedirs(self.APP_ARCHIVE_PATH) appPath = '%s%s.ipa' % (self.APP_ARCHIVE_PATH, bundleId) logger.debug('fetch app %s from backend'", "self.device.install(appPath) os.remove(appPath) tries = 3 while tries > 0 and bundleId not in", "and versions are compatible alreadyInstalled = True # check the backend for already", "str(e)) def _execute_app(self, pilot, bundleId, runId, executionStrategy=None): ''' execute the app ''' logger.debug('_execute_app')", "result class RunAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(RunAppJob, self).__init__(backend, device, jobDict)", "2: install from backend elif self.appId: # install from backend # dirty check", "a corresponding backendJob backendJobData = {} if self.jobId: backendJobData = self.backend.get_job(self.jobId) ## set", "JobExecutionError('unable to archive app binary: %s' % str(e)) def _execute_app(self, pilot, bundleId, runId,", "= appData['trackName'] self.appId = self.backend.post_app(appData) # end install via appstore return not alreadyInstalled", "trackId = 0 appInfo = {} try: trackId = store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId)", "= store.get_trackId_for_bundleId(bundleId) appInfo = store.get_app_info(trackId) except AppStoreException as e: logger.error('unable to get appInfo:", "def _archive_app_binary(self, bundleId): logger.debug('archiving %s' % bundleId) try: ### add app binary to", "(self.jobId, self.device)) backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData)", "if self.appId and alreadyInstalled: # app is installed and registred with backend logger.info('App", "executionStrategy=executionStrategy) except JobExecutionError, e: logger.error(\"Job execution failed: %s\" % str(e)) backendJobData['state'] = Job.STATE.FAILED", "APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self, backend, device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None", "JobExecutionError(\"App installation failed\") ## add app to backend ### the app data is", "device, jobDict): super(InstallAppJob, self).__init__(backend, device, jobDict) self.appId = None def _archive_app_binary(self, bundleId): logger.debug('archiving", "and 'fileSizeBytes' in app: size = 0 try: size = int(app['fileSizeBytes']) except ValueError:", "not self.appId: raise JobExecutionError(\"No appId present\") jobInfo = self.jobDict['jobInfo'] bundleId = jobInfo['bundleId'] if", "backendJobData = self.backend.get_job(self.jobId) ## set job running backendJobData['state'] = Job.STATE.RUNNING self.backend.post_job(backendJobData) pilot =", "device self.backend = backend def execute(self): raise NotImplementedError class InstallAppJob(Job): APP_ARCHIVE_PATH='/tmp/apparchive/' def __init__(self,", "raise JobExecutionError('wrong app version installed!') # the app is already installed and versions", "% str(e)) backendJobData['state'] = Job.STATE.FAILED self.backend.post_job(backendJobData) return False ## set job finished backendJobData['state']", "pilot = Pilot(self.device.base_url()) pilot.inject(self.process, self.command) else: raise JobExecutionError(\"Process or command missing\") class JobFactory(object):", "if acc['storeCountry'] == storeCountry: accountId = acc['uniqueIdentifier'] if accountId == '': raise JobExecutionError('unable", "## add run to backend runId = self.backend.post_run(self.appId, self.backend.RUN_STATE.RUNNING) logger.info('starting app pilot execution')", "job = ExecuteCmdJob(backend, device, jobDict) else: logger.error('jobDict does not contain a type!') if" ]
[ "'iteration, precision, states, file, seed, resource, throughput' print header iteration = 0 for", "== 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly',", "out_string += ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy'])", "states_list: for seed in seed_list: out_string = str(iteration) out_string += ', ' +", "# <NAME> import sys import argparse import subprocess import re from math import", "0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc',", "precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' +", "'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters',", "synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates',", "= subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput:", "', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n')", "= [] # for i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) #", "debug_file = open('debug.txt', 'w') header = 'iteration, precision, states, file, seed, resource, throughput'", "for rsrc in rsrc_list: for precision in precision_list: for states in states_list: for", "str(iteration) out_string += ', ' + str(precision) out_string += ', ' + str(states)", "rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0',", "rsrc in rsrc_list: for precision in precision_list: for states in states_list: for seed", "def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args =", "+ '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0 if __name__ == '__main__': sys.exit(main())", "<NAME> import sys import argparse import subprocess import re from math import log,", "file_list = ['59', '128', '354', '404'] rates = 4 precision_list = ['double'] states_list", "= ['59', '128', '354', '404'] rates = 4 precision_list = ['double'] states_list =", "parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list = ['59', '128', '354', '404']", "'0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc ==", "sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded',", "== 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out =", "throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError:", "'--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*) M partials/second') debug_file =", "'.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0 if __name__ == '__main__':", "str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc',", "= argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list =", "(.*) M partials/second') debug_file = open('debug.txt', 'w') header = 'iteration, precision, states, file,", "<filename>benchmarks/v3-app-note/run_benchmarks_pll_empirical.py #!/usr/bin/env python2.7 # <NAME> import sys import argparse import subprocess import re", "= [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps',", "'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd)", "synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double':", "parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list", "total: (.*) M partials/second') debug_file = open('debug.txt', 'w') header = 'iteration, precision, states,", "# samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest", "str(precision) out_string += ', ' + str(states) out_string += ', ' + str(file)", "precision_list = ['double'] states_list = [4] # site_samples = 40 # sites_min =", "throughput' print header iteration = 0 for file in file_list: for rsrc in", "'128', '354', '404'] rates = 4 precision_list = ['double'] states_list = [4] #", "'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming',", "= 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded':", "'--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*) M partials/second')", "open('debug.txt', 'w') header = 'iteration, precision, states, file, seed, resource, throughput' print header", "'--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll':", "return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest')", "for seed in seed_list: out_string = str(iteration) out_string += ', ' + str(precision)", "', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file", "log_range=(log(max) - log(min)) # samples_list = [] # for i in range(0, samples):", "states_list = [4] # site_samples = 40 # sites_min = 100 # sites_max", "#!/usr/bin/env python2.7 # <NAME> import sys import argparse import subprocess import re from", "'w') header = 'iteration, precision, states, file, seed, resource, throughput' print header iteration", "print header iteration = 0 for file in file_list: for rsrc in rsrc_list:", "['59', '128', '354', '404'] rates = 4 precision_list = ['double'] states_list = [4]", "iteration = 0 for file in file_list: for rsrc in rsrc_list: for precision", "out_string += ', ' + str(states) out_string += ', ' + str(file) out_string", "args = parser.parse_args() file_list = ['59', '128', '354', '404'] rates = 4 precision_list", "# return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to", "# for i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list", "= str(iteration) out_string += ', ' + str(precision) out_string += ', ' +", "== 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision", "in rsrc_list: for precision in precision_list: for states in states_list: for seed in", "debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0", "rsrc_list: for precision in precision_list: for states in states_list: for seed in seed_list:", "'--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu':", "argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list = ['59',", "throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc ==", "rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if", "in states_list: for seed in seed_list: out_string = str(iteration) out_string += ', '", "range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main(): parser =", "'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder'])", "'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif", "= 40 # sites_min = 100 # sites_max = 1000000 # sites_list =", "sys import argparse import subprocess import re from math import log, exp #", "= 'iteration, precision, states, file, seed, resource, throughput' print header iteration = 0", "'--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc ==", "'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput", "' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' +", "argparse import subprocess import re from math import log, exp # def gen_log_site_list(min,", "synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc", "'.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if", "file_list: for rsrc in rsrc_list: for precision in precision_list: for states in states_list:", "for i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def", "rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0',", "100 # sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list =", "site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list", "+= ', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ',", "= 0 for file in file_list: for rsrc in rsrc_list: for precision in", "seed in seed_list: out_string = str(iteration) out_string += ', ' + str(precision) out_string", "if throughput: out_string += ', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR')", "from math import log, exp # def gen_log_site_list(min, max, samples): # log_range=(log(max) -", "= open('debug.txt', 'w') header = 'iteration, precision, states, file, seed, resource, throughput' print", "+ str(states) out_string += ', ' + str(file) out_string += ', ' +", "+ str(file) out_string += ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file", "' + str(states) out_string += ', ' + str(file) out_string += ', '", "if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', '", "import log, exp # def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) #", "'0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc", "'--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0',", "10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re", "'--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats':", "= 10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree']", "= 100 # sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list", "partials/second') debug_file = open('debug.txt', 'w') header = 'iteration, precision, states, file, seed, resource,", "= ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*)", "== 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly',", "+ throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd)", "synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if", "seed, resource, throughput' print header iteration = 0 for file in file_list: for", "parser.parse_args() file_list = ['59', '128', '354', '404'] rates = 4 precision_list = ['double']", "'--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*) M partials/second') debug_file = open('debug.txt',", "rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc',", "str(file) out_string += ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file +", "'1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string +=", "site_samples = 40 # sites_min = 100 # sites_max = 1000000 # sites_list", "header = 'iteration, precision, states, file, seed, resource, throughput' print header iteration =", "synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ',", "resource, throughput' print header iteration = 0 for file in file_list: for rsrc", "import argparse import subprocess import re from math import log, exp # def", "synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif", "[args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps),", "to synthetictest') args = parser.parse_args() file_list = ['59', '128', '354', '404'] rates =", "+ str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree'])", "benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list = ['59', '128', '354',", "sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu']", "reps = 10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot',", "['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*) M", "states in states_list: for seed in seed_list: out_string = str(iteration) out_string += ',", "try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput = throughput_re.findall(synthetictest_out)", "'--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total: (.*) M partials/second') debug_file", "help='path to synthetictest') args = parser.parse_args() file_list = ['59', '128', '354', '404'] rates", "4 precision_list = ['double'] states_list = [4] # site_samples = 40 # sites_min", "'1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision'])", "40 # sites_min = 100 # sites_max = 1000000 # sites_list = gen_log_site_list(sites_min,", "'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args =", "elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try:", "out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out)", "debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0 if __name__ ==", "elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif", "# sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu',", "header iteration = 0 for file in file_list: for rsrc in rsrc_list: for", "# sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu',", "+ '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0", "str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder'])", "'--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc ==", "- log(min)) # samples_list = [] # for i in range(0, samples): #", "log_range/(samples-1)*i)))) # return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path", "i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main():", "== 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder'])", "= gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps", "synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc", "synthetictest') args = parser.parse_args() file_list = ['59', '128', '354', '404'] rates = 4", "throughput total: (.*) M partials/second') debug_file = open('debug.txt', 'w') header = 'iteration, precision,", "', ' + str(file) out_string += ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path]", "+= ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree',", "re.compile('tree throughput total: (.*) M partials/second') debug_file = open('debug.txt', 'w') header = 'iteration,", "str(seed)]) throughput_re_index = 0 if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc", "'404'] rates = 4 precision_list = ['double'] states_list = [4] # site_samples =", "+ '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed',", "if rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0',", "+= ', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string +", "# site_samples = 40 # sites_min = 100 # sites_max = 1000000 #", "subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration +=", "str(states) out_string += ', ' + str(file) out_string += ', ' + str(seed)", "for file in file_list: for rsrc in rsrc_list: for precision in precision_list: for", "in file_list: for rsrc in rsrc_list: for precision in precision_list: for states in", "math import log, exp # def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min))", "+= ', ' + str(precision) out_string += ', ' + str(states) out_string +=", "'354', '404'] rates = 4 precision_list = ['double'] states_list = [4] # site_samples", "out_string += ', ' + str(precision) out_string += ', ' + str(states) out_string", "debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return", "samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main(): parser = argparse.ArgumentParser(description='generate", "seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re =", "synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)])", "max, samples): # log_range=(log(max) - log(min)) # samples_list = [] # for i", "file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)])", "= [4] # site_samples = 40 # sites_min = 100 # sites_max =", "'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args = ['--randomtree', '--stdrand',", "range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput", "out_string = str(iteration) out_string += ', ' + str(precision) out_string += ', '", "+ str(precision) out_string += ', ' + str(states) out_string += ', ' +", "synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args() file_list = ['59', '128',", "synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif", "synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc',", "'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision ==", "'--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc", "samples): # log_range=(log(max) - log(min)) # samples_list = [] # for i in", "rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out", "synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc == 'cpu':", "0 for file in file_list: for rsrc in rsrc_list: for precision in precision_list:", "sites_min = 100 # sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples)", "# log_range=(log(max) - log(min)) # samples_list = [] # for i in range(0,", "', ' + str(states) out_string += ', ' + str(file) out_string += ',", "import re from math import log, exp # def gen_log_site_list(min, max, samples): #", "'--newtree'] throughput_re = re.compile('tree throughput total: (.*) M partials/second') debug_file = open('debug.txt', 'w')", "out_string += ', ' + str(file) out_string += ', ' + str(seed) synthetictest_cmd", "= range(1,11) extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree", "+ rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' + throughput[throughput_re_index]", "for states in states_list: for seed in seed_list: out_string = str(iteration) out_string +=", "1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats',", "+= ', ' + str(file) out_string += ', ' + str(seed) synthetictest_cmd =", "rsrc == 'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads',", "elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc',", "' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write('", "'\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0 if __name__", "python2.7 # <NAME> import sys import argparse import subprocess import re from math", "'.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)])", "= throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' + throughput[throughput_re_index] print out_string except", "states, file, seed, resource, throughput' print header iteration = 0 for file in", "samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args", "file, seed, resource, throughput' print header iteration = 0 for file in file_list:", "+= ', ' + str(states) out_string += ', ' + str(file) out_string +=", "extra_args = ['--randomtree', '--stdrand', '--fulltiming', '--newparameters', '--reroot', '--newtree'] throughput_re = re.compile('tree throughput total:", "'--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc',", "subprocess import re from math import log, exp # def gen_log_site_list(min, max, samples):", "print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n')", "samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks')", "elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu':", "throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) +", "', ' + str(precision) out_string += ', ' + str(states) out_string += ',", "out_string += ', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string +=", "synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index", "samples_list = [] # for i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i))))", "rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list =", "= 4 precision_list = ['double'] states_list = [4] # site_samples = 40 #", "= re.compile('tree throughput total: (.*) M partials/second') debug_file = open('debug.txt', 'w') header =", "['double'] states_list = [4] # site_samples = 40 # sites_min = 100 #", "' + str(file) out_string += ', ' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna',", "# samples_list = [] # for i in range(0, samples): # samples_list.append(int(round(exp(log(min) +", "[4] # site_samples = 40 # sites_min = 100 # sites_max = 1000000", "log, exp # def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) # samples_list", "== 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1'])", "gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps =", "log(min)) # samples_list = [] # for i in range(0, samples): # samples_list.append(int(round(exp(log(min)", "sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10", "= parser.parse_args() file_list = ['59', '128', '354', '404'] rates = 4 precision_list =", "file + '.tree']) synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index =", "str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc ==", "# def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) # samples_list = []", "' + str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file +", "elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc == 'pll': synthetictest_cmd.extend(['--rsrc',", "== 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc", "# sites_min = 100 # sites_max = 1000000 # sites_list = gen_log_site_list(sites_min, sites_max,", "elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args)", "', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ', '", "'0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder']) elif", "out_string += ', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string", "seed_list: out_string = str(iteration) out_string += ', ' + str(precision) out_string += ',", "= ['double'] states_list = [4] # site_samples = 40 # sites_min = 100", "gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) # samples_list = [] # for", "in seed_list: out_string = str(iteration) out_string += ', ' + str(precision) out_string +=", "synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput =", "M partials/second') debug_file = open('debug.txt', 'w') header = 'iteration, precision, states, file, seed,", "precision in precision_list: for states in states_list: for seed in seed_list: out_string =", "in precision_list: for states in states_list: for seed in seed_list: out_string = str(iteration)", "def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) # samples_list = [] #", "synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc']) synthetictest_cmd.extend(extra_args) if precision == 'double': synthetictest_cmd.extend(['--doubleprecision']) try: synthetictest_out = subprocess.check_output(synthetictest_cmd) out_string", "exp # def gen_log_site_list(min, max, samples): # log_range=(log(max) - log(min)) # samples_list =", "'pll': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats',", "+ log_range/(samples-1)*i)))) # return samples_list def main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path',", "precision_list: for states in states_list: for seed in seed_list: out_string = str(iteration) out_string", "rates = 4 precision_list = ['double'] states_list = [4] # site_samples = 40", "for precision in precision_list: for states in states_list: for seed in seed_list: out_string", "'1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu': synthetictest_cmd.extend(['--rsrc', '1,2,3,4','--multirsrc'])", "throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' + throughput[throughput_re_index] print out_string", "re from math import log, exp # def gen_log_site_list(min, max, samples): # log_range=(log(max)", "debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1", "= 1000000 # sites_list = gen_log_site_list(sites_min, sites_max, site_samples) rsrc_list = ['cpu', 'cpu-threaded', 'pll',", "throughput_re = re.compile('tree throughput total: (.*) M partials/second') debug_file = open('debug.txt', 'w') header", "'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc == 'quadruple-gpu':", "' + str(precision) out_string += ', ' + str(states) out_string += ', '", "['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args", "throughput: out_string += ', ' + throughput[throughput_re_index] print out_string except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n')", "== 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc ==", "+ '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration += 1 return 0 if", "= ['cpu', 'cpu-threaded', 'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11)", "file in file_list: for rsrc in rsrc_list: for precision in precision_list: for states", "subprocess.check_output(synthetictest_cmd) out_string += ', ' + rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string", "except subprocess.CalledProcessError: debug_file.write('ERROR') debug_file.write('===============================================================\\n') debug_file.write(out_string + '\\n') debug_file.write(' '.join(synthetictest_cmd) + '\\n') debug_file.write(synthetictest_out) iteration", "import sys import argparse import subprocess import re from math import log, exp", "main(): parser = argparse.ArgumentParser(description='generate synthetictest benchmarks') parser.add_argument('synthetictest_path', help='path to synthetictest') args = parser.parse_args()", "precision, states, file, seed, resource, throughput' print header iteration = 0 for file", "import subprocess import re from math import log, exp # def gen_log_site_list(min, max,", "synthetictest_cmd.extend(['--states', str(states)]) synthetictest_cmd.extend(['--reps', str(reps), '--rates', str(rates)]) synthetictest_cmd.extend(['--seed', str(seed)]) throughput_re_index = 0 if rsrc", "'cpu': synthetictest_cmd.extend(['--rsrc', '0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif", "rsrc throughput = throughput_re.findall(synthetictest_out) if throughput: out_string += ', ' + throughput[throughput_re_index] print", "synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--postorder']) elif rsrc == 'pll-repeats': synthetictest_cmd.extend(['--rsrc', '0', '--pllonly', '--pllrepeats', '--postorder'])", "'0', '--postorder']) elif rsrc == 'cpu-threaded': synthetictest_cmd.extend(['--rsrc', '0', '--enablethreads', '--postorder']) elif rsrc ==", "rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc']) elif rsrc", "[] # for i in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return", "in range(0, samples): # samples_list.append(int(round(exp(log(min) + log_range/(samples-1)*i)))) # return samples_list def main(): parser", "str(seed) synthetictest_cmd = [args.synthetictest_path] synthetictest_cmd.extend(['--alignmentdna', file + '.phy']) synthetictest_cmd.extend(['--tree', file + '.tree']) synthetictest_cmd.extend(['--states',", "'pll', 'pll-repeats', 'gpu', 'dual-gpu'] reps = 10 seed_list = range(1,11) extra_args = ['--randomtree',", "'--postorder']) elif rsrc == 'gpu': synthetictest_cmd.extend(['--rsrc', '1']) elif rsrc == 'dual-gpu': synthetictest_cmd.extend(['--rsrc', '1,2','--multirsrc'])" ]
[ "scan_result = check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True,", "CalledProcessError as e: logging.error(\"There seems to be an error in the rule file:\\n{}\".format(e.output.decode()))", "error in the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception as", "= split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output)", "string :return: dict ''' variables = convert_external_variables(external_variables) recursive = '-r' if recursive else", "import json import logging from .common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False):", "= rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule):", "match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string, _,", "json import logging from .common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): '''", "file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to be an error", "= match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches else", "to signature file :type signature_path: string :param file_path: files to scan :type file_path:", "parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches = dict()", "matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' '''", "in item: key, value = item.split('=', maxsplit=1) value = json.loads(value) if value in", "_, _ = rule assert len(match) == 4 _, offset, matched_tag, matched_string =", "_split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]):", "as e: logging.error(\"There seems to be an error in the rule file:\\n{}\".format(e.output.decode())) return", "--print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e:", "signature_path: string :param file_path: files to scan :type file_path: string :return: dict '''", "offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name", "matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks =", "file_path, external_variables={}, recursive=False): ''' Scan files and return matches :param signature_path: path to", "be an error in the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except", "rule assert len(match) == 4 _, offset, matched_tag, matched_string = match meta_dict =", "sys import re import json import logging from .common import convert_external_variables def scan(signature_path,", "match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return", "== len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4", "resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match", "split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex =", "match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules =", "if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode()))", "re import json import logging from .common import convert_external_variables def scan(signature_path, file_path, external_variables={},", "= _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(),", "rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not", "16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be of form", "{} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+')", "len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) ==", "meta_data = dict() for item in meta_data_string.split(','): if '=' in item: key, value", "= dict() for item in meta_data_string.split(','): if '=' in item: key, value =", "file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse", "enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex", "this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be of", "= re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)')", "recursive else '' try: scan_result = check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables,", "4 rule_name, meta_string, _, _ = rule assert len(match) == 4 _, offset,", "{}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to", "import re import json import logging from .common import convert_external_variables def scan(signature_path, file_path,", "if value in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed meta", "rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches,", "convert_external_variables(external_variables) recursive = '-r' if recursive else '' try: scan_result = check_output(\"yara {}", "from subprocess import check_output, CalledProcessError, STDOUT import sys import re import json import", "re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match,", "for item in meta_data_string.split(','): if '=' in item: key, value = item.split('=', maxsplit=1)", "= item.split('=', maxsplit=1) value = json.loads(value) if value in ['true', 'false'] else value.strip('\\\"')", "= re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches,", "rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name]", "{} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems", "match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for", "rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def", "Scan files and return matches :param signature_path: path to signature file :type signature_path:", "_parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e))", "_parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict)", "key, value = item.split('=', maxsplit=1) value = json.loads(value) if value in ['true', 'false']", "meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True,", "return matches :param signature_path: path to signature file :type signature_path: string :param file_path:", "return {} try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse yara", "assert len(match) == 4 _, offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string)", "= rule assert len(match) == 4 _, offset, matched_tag, matched_string = match meta_dict", "'=' in item: key, value = item.split('=', maxsplit=1) value = json.loads(value) if value", "['true', 'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed meta string \\'{}\\''.format(meta_data_string)) return", "except CalledProcessError as e: logging.error(\"There seems to be an error in the rule", "index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches", "logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches", "rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string, _, _", "== 4 _, offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match =", "files to scan :type file_path: string :return: dict ''' variables = convert_external_variables(external_variables) recursive", "file_path: string :return: dict ''' variables = convert_external_variables(external_variables) recursive = '-r' if recursive", "meta_data_string.split(','): if '=' in item: key, value = item.split('=', maxsplit=1) value = json.loads(value)", "if '=' in item: key, value = item.split('=', maxsplit=1) value = json.loads(value) if", "= json.loads(value) if value in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value else:", ":param file_path: files to scan :type file_path: string :return: dict ''' variables =", "_append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string, _, _ = rule", "be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in meta_data_string.split(','): if", "resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule", "and return matches :param signature_path: path to signature file :type signature_path: string :param", ":type signature_path: string :param file_path: files to scan :type file_path: string :return: dict", "def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string, _, _ =", "matched_string = match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches", "in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] =", "dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string):", "= resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16),", "matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match,", "match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks,", "scan :type file_path: string :return: dict ''' variables = convert_external_variables(external_variables) recursive = '-r'", "rule): assert len(rule) == 4 rule_name, meta_string, _, _ = rule assert len(match)", "else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def", "in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks", "4 _, offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name]", "re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule)", "'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed meta string \\'{}\\''.format(meta_data_string)) return meta_data", "def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for", "from .common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and", "dict() for item in meta_data_string.split(','): if '=' in item: key, value = item.split('=',", "STDOUT import sys import re import json import logging from .common import convert_external_variables", "match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*')", "to scan :type file_path: string :return: dict ''' variables = convert_external_variables(external_variables) recursive =", "''' meta_data = dict() for item in meta_data_string.split(','): if '=' in item: key,", "file :type signature_path: string :param file_path: files to scan :type file_path: string :return:", "check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except", "seems to be an error in the rule file:\\n{}\".format(e.output.decode())) return {} try: return", "CalledProcessError, STDOUT import sys import re import json import logging from .common import", "value in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed meta string", "recursive=False): ''' Scan files and return matches :param signature_path: path to signature file", "as e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def", "assert len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule)", "return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string,", "= this_match def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data =", "signature file :type signature_path: string :param file_path: files to scan :type file_path: string", "= check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT)", "e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output):", "strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will", "matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): '''", "in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output):", "def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for", "'-r' if recursive else '' try: scan_result = check_output(\"yara {} {} --print-meta --print-strings", "while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks)", "= dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in", "this_match def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict()", "= convert_external_variables(external_variables) recursive = '-r' if recursive else '' try: scan_result = check_output(\"yara", "= '-r' if recursive else '' try: scan_result = check_output(\"yara {} {} --print-meta", "_append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output)", "import sys import re import json import logging from .common import convert_external_variables def", "not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches =", "check_output, CalledProcessError, STDOUT import sys import re import json import logging from .common", "file_path: files to scan :type file_path: string :return: dict ''' variables = convert_external_variables(external_variables)", "rule_name, meta_string, _, _ = rule assert len(match) == 4 _, offset, matched_tag,", "path to signature file :type signature_path: string :param file_path: files to scan :type", "e: logging.error(\"There seems to be an error in the rule file:\\n{}\".format(e.output.decode())) return {}", "matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..'", "variables = convert_external_variables(external_variables) recursive = '-r' if recursive else '' try: scan_result =", "{} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError", "item in meta_data_string.split(','): if '=' in item: key, value = item.split('=', maxsplit=1) value", "re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules", "try: scan_result = check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path),", "yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks,", "dict ''' variables = convert_external_variables(external_variables) recursive = '-r' if recursive else '' try:", "for index, rule in enumerate(rules): for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return", "_parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index,", "import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and return matches", "result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules", "''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in", "resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while", "{} try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse yara result:", "recursive = '-r' if recursive else '' try: scan_result = check_output(\"yara {} {}", "--print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There", "'' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) ==", "logging.error(\"There seems to be an error in the rule file:\\n{}\".format(e.output.decode())) return {} try:", "len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert len(rule) == 4 rule_name,", "an error in the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception", "for match in matches_regex.findall(match_blocks[index]): _append_match_to_result(match, resulting_matches, rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex =", "rule) return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while ''", "len(match) == 4 _, offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match", "convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and return matches :param", "stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to be an error in the", ":param signature_path: path to signature file :type signature_path: string :param file_path: files to", "meta_string, _, _ = rule assert len(match) == 4 _, offset, matched_tag, matched_string", "item: key, value = item.split('=', maxsplit=1) value = json.loads(value) if value in ['true',", "shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to be an error in", "len(rule) == 4 rule_name, meta_string, _, _ = rule assert len(match) == 4", "resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data", "dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules):", "subprocess import check_output, CalledProcessError, STDOUT import sys import re import json import logging", "= _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match in", "in the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception as e:", "_parse_meta_data(meta_data_string): ''' Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item", "= re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules def", "Will be of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in meta_data_string.split(','):", "e)) return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex", "return resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in", "else '' try: scan_result = check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive,", "'' try: scan_result = check_output(\"yara {} {} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path,", "except Exception as e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return", "split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert", "matches :param signature_path: path to signature file :type signature_path: string :param file_path: files", "''' Scan files and return matches :param signature_path: path to signature file :type", "files and return matches :param signature_path: path to signature file :type signature_path: string", "return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__,", "external_variables={}, recursive=False): ''' Scan files and return matches :param signature_path: path to signature", "scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and return matches :param signature_path: path", "rules = _split_output_in_rules_and_matches(output) matches_regex = re.compile(r'((0x[a-f0-9]*):(\\S+):\\s(.+))+') for index, rule in enumerate(rules): for match", "form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in meta_data_string.split(','): if '=' in", "json.loads(value) if value in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed", "''' variables = convert_external_variables(external_variables) recursive = '-r' if recursive else '' try: scan_result", "Exception as e: logging.error('Could not parse yara result: {} {}'.format(sys.exc_info()[0].__name__, e)) return {}", "match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name,", "matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if rule_name in", "this_match = resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset,", "the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could", "resulting_matches[rule_name] if rule_name in resulting_matches else dict(rule=rule_name, matches=True, strings=list(), meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag,", "assert len(rule) == 4 rule_name, meta_string, _, _ = rule assert len(match) ==", "maxsplit=1) value = json.loads(value) if value in ['true', 'false'] else value.strip('\\\"') meta_data[key] =", "string :param file_path: files to scan :type file_path: string :return: dict ''' variables", "in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value else: logging.warning('Malformed meta string \\'{}\\''.format(meta_data_string))", "try: return _parse_yara_output(scan_result.decode()) except Exception as e: logging.error('Could not parse yara result: {}", "meta=meta_dict) this_match['strings'].append((int(offset, 16), matched_tag, matched_string.encode())) resulting_matches[rule_name] = this_match def _parse_meta_data(meta_data_string): ''' Will be", ".common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and return", ":type file_path: string :return: dict ''' variables = convert_external_variables(external_variables) recursive = '-r' if", "signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to be an", "{}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output)", "resulting_matches def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks:", "signature_path: path to signature file :type signature_path: string :param file_path: files to scan", "rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules def _append_match_to_result(match, resulting_matches, rule): assert", "== 4 rule_name, meta_string, _, _ = rule assert len(match) == 4 _,", "import check_output, CalledProcessError, STDOUT import sys import re import json import logging from", "return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules = _split_output_in_rules_and_matches(output) matches_regex =", "of form 'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in meta_data_string.split(','): if '='", ":return: dict ''' variables = convert_external_variables(external_variables) recursive = '-r' if recursive else ''", "in meta_data_string.split(','): if '=' in item: key, value = item.split('=', maxsplit=1) value =", "_split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('') rule_regex", "value = json.loads(value) if value in ['true', 'false'] else value.strip('\\\"') meta_data[key] = value", "{} {}'.format(sys.exc_info()[0].__name__, e)) return {} def _parse_yara_output(output): resulting_matches = dict() match_blocks, rules =", "if recursive else '' try: scan_result = check_output(\"yara {} {} --print-meta --print-strings {}", "rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules) return match_blocks, rules", "import logging from .common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan", "_ = rule assert len(match) == 4 _, offset, matched_tag, matched_string = match", "_, offset, matched_tag, matched_string = match meta_dict = _parse_meta_data(meta_string) this_match = resulting_matches[rule_name] if", "def _split_output_in_rules_and_matches(output): split_regex = re.compile(r'\\n*.*\\[.*\\]\\s\\/.+\\n*') match_blocks = split_regex.split(output) while '' in match_blocks: match_blocks.remove('')", "value = item.split('=', maxsplit=1) value = json.loads(value) if value in ['true', 'false'] else", "'item0=lowercaseboolean0,item1=\"value1\",item2=value2,..' ''' meta_data = dict() for item in meta_data_string.split(','): if '=' in item:", "in match_blocks: match_blocks.remove('') rule_regex = re.compile(r'(.*)\\s\\[(.*)\\]\\s([\\.\\.\\/]|[\\/]|[\\.\\/])(.+)') rules = rule_regex.findall(output) assert len(match_blocks) == len(rules)", "to be an error in the rule file:\\n{}\".format(e.output.decode())) return {} try: return _parse_yara_output(scan_result.decode())", "item.split('=', maxsplit=1) value = json.loads(value) if value in ['true', 'false'] else value.strip('\\\"') meta_data[key]", "{} --print-meta --print-strings {} {}\".format(variables, recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as", "resulting_matches, rule): assert len(rule) == 4 rule_name, meta_string, _, _ = rule assert", "logging from .common import convert_external_variables def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files", "recursive, signature_path, file_path), shell=True, stderr=STDOUT) except CalledProcessError as e: logging.error(\"There seems to be", "def scan(signature_path, file_path, external_variables={}, recursive=False): ''' Scan files and return matches :param signature_path:" ]
[ "which the API URL for should be generated from. Returns ------- url :", "submission_id if submission_id is not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status", "not have the full functionality of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py.", "In (key, value, type) form. # Detailed is also their useage cases per", "job ID from the image upload results. It may be the case that", "API. This also determines the submission ID and the job ID for the", "status. results[\"status\"] = status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] =", "provide a valid session.\" ) else: # The session should be fine. session_key", "It is obtained from the fits corr file that is downloaded into a", "submission_id: str = None) -> dict: \"\"\"Get the results of a submission specified", "self.__job_id = job_id else: raise error.ReadOnlyError( \"The job ID has already been set", "str, job_id: str = None ) -> None: \"\"\"Downloads fits data table files", "the job status. status = job_result.get(\"status\") finally: return status # Should not get", "self.session = None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session = session_key #", "= image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission", "None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id = None return None __doc_job_id", "self.__job_id = None return None __doc_job_id = ( \"When file upload or table", "JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to send this", "above. \"\"\" job_id = job_id if job_id is not None else self.job_id #", "def get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get the status of a", "else upload_filename + \"_wcs\" ) # The full path of the filename derived", "file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header from the file.", "not ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url, filename=filename, overwrite=True ) return", "in queue. status = None else: # Check the job status. status =", "just extract it from the header file using Astropy. Parameters ---------- job_id :", "dict The arguments which can be used to send the request. \"\"\" args", "None def __set_job_id(self, job_id) -> None: \"\"\"Assign the job ID, it should only", "session: raise error.WebRequestError( \"The provided API key did not provide a valid session.\"", "raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the submission id. This allows", "self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id) -> None: \"\"\"Assign the job", "it matches the upload specifications. Returns ------- results : dictionary The results of", "upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper to allow for the uploading", "if file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided file type to be", "None ) -> dict: \"\"\"A wrapper function for sending a webrequest to the", "for what is going on. if self.submission_id is None: raise error.WebRequestError( \"There cannot", "in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex:", "not None else self.job_id # Get the result of the job. service_string =", "the defaults with user provided arguments where desired. Parameters ---------- **kwargs : dict", "job_id = job_id if job_id is not None else self.job_id # Get the", "allow for the uploading of files or images to the API. This also", "of a job sent to the API service. Parameters ---------- job_id : str,", "The world coordinate solution class for the image provided. \"\"\" job_id = job_id", "status. status = job_result.get(\"status\") finally: return status # Should not get here. raise", "The type of file to be downloaded from astrometry.net. It should one of", "int), (\"x\", None, list), (\"y\", None, list), (\"album\", None, str), ] def __init__(self,", "Arguments which would override the defaults. Returns ------- args : dict The arguments", "is returned.) - Status : The status of the job. - Calibration :", "a submission specified by its ID. Parameters ---------- submission_id : str The ID", "URL links are derived from. This should be used if the API is", "successful. status = job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError( \"The job", "\"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error", "dict: \"\"\"Get the results of a job sent to the API service. Parameters", "# Construct the URL for the request. It is a little different from", "return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check that", "API key to log in a derive a session key. self.session = None", "= None The ID of the submission. If it is not passed, the", "executed. This is helpful for debugging or similar processes. Returns ------- None \"\"\"", "submission_id is not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\")", "dict The results of the API call to upload the image. \"\"\" #", "the image provided. \"\"\" job_id = job_id if job_id is not None else", "error.WebRequestError: # Make a more helpful error message for what is going on.", "= self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get", "The base url which all other API URL links are derived from. This", "= None ) -> None: \"\"\"Downloads fits data table files which correspond to", "urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the request. request = urllib.request.Request(url=api_url, headers=headers,", "return None __doc_job_id = ( \"When file upload or table upload is sent", "result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str = None) -> str:", "locations. \"\"\" job_id = job_id if job_id is not None else self.job_id #", "still in queue. if len(job_id_list) == 0: self.__job_id = None else: self.__job_id =", "# The logic should not flow beyond this point. raise error.LogicFlowError return None", "requires that the data format must be a JSON based datatype. json_data =", "to obtain the desired service URL. Parameters ---------- service : str The service", "= \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id)", "= library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file after loading it", "filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header from the file. wcs_header =", "\"axy\", \"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided file type", "default arguments for uploading files. In (key, value, type) form. # Detailed is", "session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission ID from the image upload", "for astrometry.net. This API does not have the full functionality of the default", "is likely still in queue. if len(job_id_list) == 0: self.__job_id = None else:", "return results def get_job_status(self, job_id: str = None) -> str: \"\"\"Get the status", "yet, None is returned instead. \"\"\" job_id = job_id if job_id is not", "__doc_submission_id = ( \"When file upload or table upload is sent to the", "submission. If it is not passed, the ID determined by the file upload", "ID and the job ID for the uploaded image and saves it. Parameters", "in of the location of stars detected in the provided image. - `corr`:", "job is still in queue. status = None else: # Check the job", "None def __login(self, apikey: str) -> str: \"\"\"The method to log into the", "positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None, list), (\"y\", None, list),", "which all other API URL links are derived from. This should be used", "------- url : str The URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL", "in the # correct format. Namely, a multipart/form-data format. if file_args is not", "provided API key did not provide a valid session.\" ) else: # The", "Load the data from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 )", "still be in the temporary directory. delete_after : bool, default = True Delete", "It may be the case that there is not job yet associated with", "for the uploaded image and saves it. Parameters ---------- pathname : str The", "str The type of file to be downloaded from astrometry.net. It should one", "The URL which to send this request to, constructed from the service #", "is used. Returns ------- None \"\"\" # Get the proper job ID. job_id", "arguments which can be used to send the request. \"\"\" args = {}", "log in. original_upload_filename : string The original filename that was used to upload", "processes. Returns ------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url)", "API key for the web API service. Returns ------- session_key : string The", "Parameters ---------- service : string The service which is being requested. The web", "the job ID from the image upload results. It may be the case", "'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" + \"--\" + boundary", "string The status of the submission. If the job has not run yet,", ": string, default = None The filename that the downloaded correlation file will", "send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try:", "valid type which can\" \" be downloaded, it must be one of: {fty}\".format(", "scheme so a new method is made. def _construct_file_download_url(ftype: str, id: str) ->", "message=error_message ) ) else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request", "This is helpful for debugging or similar processes. Returns ------- None \"\"\" #", "args: dict = {}, file_args: dict = None ) -> dict: \"\"\"A wrapper", "str, default = None The ID of the job that the results should", "known, helps the # processing a little. (\"scale_units\", None, str), (\"scale_type\", None, str),", "For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine", "is a little different from the # normal API scheme so a new", "a self-hosted install or has a different web source than nova.astrometry.net. Defaults to", "# Obtain the session key derived when this class is instantiated and #", "\"\"\"Get the results of a submission specified by its ID. Parameters ---------- submission_id", "service URL. Parameters ---------- service : str The service which the API URL", "degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None, float), # Image properties,", "happened is unknown. raise error.UndiscoveredError(\"Why the web request failed is unknown.\") else: job_id_list", "from the service # desired. api_url = self._generate_service_url(service=service) # If the request requires", "if job_id is None: raise error.WebRequestError(\"There is no job to download the file", "to # call the API again. if self.__job_id is not None: return self.__job_id", "nearby. - `axy`: A table in of the location of stars detected in", "int), (\"crpix_center\", None, bool), (\"invert\", None, bool), # These parameters are needed if", ") -> hint.Table: \"\"\"This obtains the table that correlates the location of reference", "string The session key for this login session. \"\"\" # The key. args", "be taken to sure it matches the upload specifications. Returns ------- results :", "url : str The URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL +", "except error.WebRequestError: # This error is likely because the job is still in", "else _DEFAULT_BASE_URL ) # Use the API key to log in a derive", "that the downloaded correlation file will be downloaded as. The path is going", "as well. Returns ------- results : dict The results of the API call", "output cannot be properly processed. This is likely\" \" from a bad web", "downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the", "on.\" ) else: # What happened is unknown. raise error.UndiscoveredError(\"Why the web request", "the API to get the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id )", "ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a more", "= \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre =", "objects in the image, people inputted. - Machine Tags : Ditto for tags,", "webrequest to the astrometry.net API service. Returns the results as well. Parameters ----------", "wcs : Astropy WCS The world coordinate solution class for the image provided.", "the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header from", "the API, the job ID of the\" \" submission is saved here.\" )", "str: \"\"\"Construct the file curl from the file type `ftype` and the job", "= None, silent: bool = True) -> None: \"\"\"The instantiation, connecting to the", "us, we just extract it from the header file using Astropy. Parameters ----------", "+ service return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for", "image field. - Annotations : Known objects in the field, with annotations. -", "queue. return None # Check that the service was successful. status = job_result.get(\"status\",", "(\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None, float), # These parameters allows", "API URL links are derived from. This should be used if the API", "the image uploaded. - Tags : Known tagged objects in the image, people", "-> None: \"\"\"The instantiation, connecting to the web API using the API key.", "__get_submission_id(self) -> str: \"\"\"Extract the submission ID from the image upload results.\"\"\" image_results", "delete_after: bool = True ) -> hint.Table: \"\"\"This obtains the table that correlates", "+ \"\\n\" + \"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version:", "for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex)", "submission ID and the job ID for the uploaded image and saves it.", "table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename is", "{} # For the status. results[\"status\"] = status # For the calibrations. service_string", "# Extract the submission id. This allows for easier # association between this", "library import opihiexarata.library.error as error import opihiexarata.library.hint as hint # The base URL", "---------- job_id : str, default = None The ID of the job that", "is no job to download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise", "file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data from the file.", "0: self.__job_id = None else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return", "key for requests. if self.session is not None: args.update({\"session\": self.session}) # The API", "= self._generate_service_url(service=service) # If the request requires that a file be send, then", "are needed if being sent instead is an x,y list of # source", "nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey : string The API key", "if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary", "class for the image provided. \"\"\" job_id = job_id if job_id is not", "str: \"\"\"Generate the correct URL for the desired service. Because astrometry.net uses a", "likely because the job is still in queue. return None # Check that", "the uploaded image and saves it. Parameters ---------- pathname : str The pathname", "file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided file type to be downloaded", "= None try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\":", "+ boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data;", "str() self._image_return_results = {} return None def __login(self, apikey: str) -> str: \"\"\"The", "astrometry.net \"\"\" # The default arguments for uploading files. In (key, value, type)", "__del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the job ID from the", "then it must be in the # correct format. Namely, a multipart/form-data format.", "the filename derived from saving it in a temporary # directory. corr_filename =", "\" service.\" ) return None def __del_submission_id(self) -> None: \"\"\"Remove the current submission", "None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool), # These parameters are needed", "change. # They must be reset because of their read-only nature. del self.submission_id,", "job_id=job_id ) # Load the data from the file. __, correlation_table = library.fits.read_fits_table_file(", "exist: {path}\".format(path=pathname)) # Extract the submission id. This allows for easier # association", "error is likely because the job is still in queue. return None #", "world coordinate solution class for the image provided. \"\"\" job_id = job_id if", "# Finally send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the", "\"The provided file type to be downloaded is not a valid type which", "submission. If the job has not run yet, None is returned instead. \"\"\"", "is still processing and thus the data files\" \" are not ready.\" )", "None The base url which all other API URL links are derived from.", "download link is not giving an acceptable http status code.\" \" It is", "into. Use this session key for requests. if self.session is not None: args.update({\"session\":", "} data_pre = str( \"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" +", "None return None __doc_job_id = ( \"When file upload or table upload is", "original filename that was used to upload the data. session : string The", "it from the API service.\" ) return None def __del_job_id(self) -> None: \"\"\"Remove", "str), ] def __init__(self, url=None, apikey: str = None, silent: bool = True)", "bool = True) -> None: \"\"\"The instantiation, connecting to the web API using", "\"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\" + boundary +", "= result.get(\"session\", False) # Check if the session works and that the API", "for sending a request. This constructs the needed arguments, replacing the defaults with", "this string. args : dictionary, default = {} The arguments being sent over", "bool, default = True Delete the file after downloading it to extract its", "all other service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A", "import astropy.wcs as ap_wcs import opihiexarata.library as library import opihiexarata.library.error as error import", "the submission and job IDs will change. # They must be reset because", "boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\";", "= None) -> str: \"\"\"Get the status of a submission specified by its", "Namely, a multipart/form-data format. if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for", "\"\"\"A wrapper to allow for the uploading of files or images to the", "------- None \"\"\" # Get the proper job ID. job_id = job_id if", "__del_submission_id(self) -> None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id = None return", "after downloading it to extract its information. Returns ------- wcs : Astropy WCS", "care must be taken to sure it matches the upload specifications. Returns -------", "job_result.get(\"status\") finally: return status # Should not get here. raise error.LogicFlowError return None", "The path is going to still be in the temporary directory. delete_after :", "must be reset because of their read-only nature. del self.submission_id, self.job_id # Save", "Known objects in the field, with annotations. - Info : A collection of", "point. raise error.LogicFlowError return None def get_job_results(self, job_id: str = None) -> dict:", "by obtaining it from the API\" \" service.\" ) return None def __del_submission_id(self)", "bool, default = True Should there be printed messages as the processes are", "around the web API for astrometry.net. This API does not have the full", "results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id)", "service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str =", "True ) -> hint.Table: \"\"\"This obtains the table that correlates the location of", "the image is obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id else: raise", "job_id = job_id if job_id is not None else self.job_id # Ensure that", "data format must be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The", "is no reason to # call the API again. if self.__job_id is not", "if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download link is not giving", "None, int), (\"x\", None, list), (\"y\", None, list), (\"album\", None, str), ] def", "it to extract its information. Returns ------- wcs : Astropy WCS The world", "file_type : str The type of file to be downloaded from astrometry.net. It", "to the API, the submission ID is\" \" saved here.\" ) __submission_id =", "requires that a file be send, then it must be in the #", "\"\"\"This obtains the wcs header file and then computes World Coordinate System solution", "should one of the following: - `wcs`: The world corrdinate data table file.", "logged into. Use this session key for requests. if self.session is not None:", "_DEFAULT_URL_ARGUMENTS = [ # These parameters are for licensing and distribution terms. (\"allow_commercial_use\",", "can pull # from the API service. Accommodating for capitalization. file_type = str(file_type).lower()", ") # Load the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs =", "+ \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\"", "key did not provide a valid session.\" ) else: # The session should", "in the field, with annotations. - Info : A collection of most everything", "is not None else self.job_id # Get the result of the job. service_string", "+ \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" +", "it if desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str", "= file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the status of the request", "str The pathname of the file to open. The filename is extracted and", "provided is a valid status. status = result.get(\"status\") if status == \"error\": error_message", "string The original filename that was used to upload the data. session :", "stars location in the sky and in pixel space. job_id : str, default", "extracted and used as well. Returns ------- results : dict The results of", "only via machine inputs. - Objects in field : Known objects in the", "visibility by the general public. (\"publicly_visible\", \"y\", str), # Image scaling parameters, if", "key for this login session. \"\"\" # The key. args = {\"apikey\": apikey}", "there be printed messages as the processes are executed. This is helpful for", "# The full path of the filename derived from saving it in a", "raise error.LogicFlowError return None def get_job_results(self, job_id: str = None) -> dict: \"\"\"Get", "None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str:", "service which the API URL for should be generated from. Returns ------- url", "the API again. if self.__job_id is not None: return self.__job_id # Call the", "# Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the", "error.WebRequestError( \"The file download link is not giving an acceptable http status code.\"", "= \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API for astrometry.net.", "obtaining it from the API\" \" service.\" ) return None def __del_submission_id(self) ->", "= library.json.json_to_dictionary(json_string=text) # Check if the status of the request provided is a", "base URL for the API which all other service URLs are derived from.", "( submission_id if submission_id is not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id)", "`new_fits`, `new_image`: A new fits file, containing the original image, annotations, and WCS", "{\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the request.", "{ \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\" + boundary", "already been set by obtaining it from the API\" \" service.\" ) return", "web URL is constructed from this string. args : dictionary, default = {}", "saved to disk. file_type : str The type of file to be downloaded", "the correspondences between reference stars location in the sky and in pixel space.", "the API system. Parameters ---------- apikey : string The API key for the", "-> None: \"\"\"Assign the job ID, it should only be done once when", "web API using the API key. Parameters ---------- url : string, default =", "\"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API for astrometry.net. This", "None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def", "temporary directory. delete_after : bool, default = True Delete the file after downloading", "value, type) form. # Detailed is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url", "The world corrdinate data table file. - `new_fits`, `new_image`: A new fits file,", "{\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) #", "self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id:", "`wcs`: The world corrdinate data table file. - `new_fits`, `new_image`: A new fits", "has not run yet, None is returned instead. \"\"\" job_id = job_id if", "other API URL links are derived from. This should be used if the", "else self.job_id # Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) try:", "the image upload results. It may be the case that there is not", "The session should be fine. session_key = session return session_key def __get_submission_id(self) ->", "-> str: \"\"\"Generate the correct URL for the desired service. Because astrometry.net uses", "+ \"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise, the", "return upload_results def download_result_file( self, filename: str, file_type: str, job_id: str = None", "be downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct", "# If the job ID already has been obtained, then there is no", "key derived when this class is instantiated and # logged into. Use this", "self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because the job is still", "str), # For visibility by the general public. (\"publicly_visible\", \"y\", str), # Image", "return wcs def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper to allow", "ID determined by the file upload is used. Returns ------- None \"\"\" #", "from. If not provided, the ID determined by the file upload is used.", "filename of the file when it is downloaded and saved to disk. file_type", "else: results = {} # For the status. results[\"status\"] = status # For", "a bad web request.\" ) # The logic should not flow beyond this", "path is going to still be in the temporary directory. delete_after : bool,", "for us, we just extract it from the header file using Astropy. Parameters", "- `new_fits`, `new_image`: A new fits file, containing the original image, annotations, and", "send this request to, constructed from the service # desired. api_url = self._generate_service_url(service=service)", "= None ) -> dict: \"\"\"A wrapper function for sending a webrequest to", "preprocessing it a little can help in its # determination. (\"parity\", None, int),", "fits file, containing the original image, annotations, and WCS header information. - `rdls`:", "nature. del self.submission_id, self.job_id # Save the file information. self.original_upload_filename = pathname args", "self._generate_service_url(service=service) # If the request requires that a file be send, then it", "id: str) -> str: \"\"\"Construct the file curl from the file type `ftype`", "exists. if job_id is None: raise error.WebRequestError(\"There is no job to download the", "# Download the correlation file to read into a data table. upload_filename =", "\"The API key provided is not a valid key.\" ) else: raise error.WebRequestError(", "must be in the # correct format. Namely, a multipart/form-data format. if file_args", "the ID determined by the file upload is used. temp_filename : string, default", "nova.astrometry.net api service. apikey : string The API key of the user. silent", "it from the API\" \" service.\" ) return None def __del_submission_id(self) -> None:", "\"\"\" submission_id = ( submission_id if submission_id is not None else self.submission_id )", "Returns ------- status : string The status of the submission. \"\"\" submission_id =", "= {}, file_args: dict = None ) -> dict: \"\"\"A wrapper function for", "results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string)", "the proper job ID. job_id = job_id if job_id is not None else", ": str The filename of the file when it is downloaded and saved", "# Use the API key to log in a derive a session key.", "-> None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id = None return None", "None: args.update({keydex: defaultdex}) return args def _send_web_request( self, service: str, args: dict =", "The ID of the job that the results should be obtained from. If", "the radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None,", "apikey : string The API key for the web API service. Returns -------", "str = None) -> str: \"\"\"Get the status of a submission specified by", "import opihiexarata.library.hint as hint # The base URL for the API which all", "needed arguments, replacing the defaults with user provided arguments where desired. Parameters ----------", "the desired service. Because astrometry.net uses a convension, we can follow it to", "job. They are, in general: (If the job has not finished yet, None", "args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex: defaultdex}) return args def _send_web_request(", "file upload is used. Returns ------- status : string The status of the", "service : str The service which the API URL for should be generated", "= ( submission_id if submission_id is not None else self.submission_id ) service_string =", "# Delete the temporary file after loading it if desired. if delete_after: os.remove(corr_pathname)", "Delete the file after downloading it to extract its information. Returns ------- correlation_table", "derived when this class is instantiated and # logged into. Use this session", "\"\\n\" + \"--\" + boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"] +", "arguments for uploading files. In (key, value, type) form. # Detailed is also", "return session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission ID from the image", "{\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check if", "For visibility by the general public. (\"publicly_visible\", \"y\", str), # Image scaling parameters,", "processes are executed. This is helpful for debugging or similar processes. Returns -------", "in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string", "The key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\",", "the API key. Parameters ---------- url : string, default = None The base", "= None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) ->", "session = result.get(\"session\", False) # Check if the session works and that the", "args.update({keydex: defaultdex}) return args def _send_web_request( self, service: str, args: dict = {},", "form should be standard encoded: x-www-form-encoded headers = {} data = {\"request-json\": json_data}", "# For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the", "extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file(", "(\"album\", None, str), ] def __init__(self, url=None, apikey: str = None, silent: bool", "def __get_submission_id(self) -> str: \"\"\"Extract the submission ID from the image upload results.\"\"\"", "\"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data", "wcs def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper to allow for", "Returns ------- None \"\"\" # Get the proper job ID. job_id = job_id", "request. file_args : dictionary, default = None If a file is being uploaded", "file_args : dictionary, default = None If a file is being uploaded instead,", ": string, default = None The ID of the job that the results", "self.submission_id is None: raise error.WebRequestError( \"There cannot be a job id without there", "class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API for astrometry.net. This API", ": dict The result of the submission. \"\"\" submission_id = ( submission_id if", "None def __del_job_id(self) -> None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id =", "and the job ID for the uploaded image and saves it. Parameters ----------", "results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] =", ") else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot", "= { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\" +", "not session: raise error.WebRequestError( \"The provided API key did not provide a valid", "of this class is to be simple enough to be understood by others", "Tags : Ditto for tags, but only via machine inputs. - Objects in", "temp_filename : string, default = None The filename that the downloaded correlation file", "url = self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate", "boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\" + boundary + \"\\n\" +", "data=data) # Processing the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text", ") service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str", "as the processes are executed. This is helpful for debugging or similar processes.", "if self.__job_id is not None: return self.__job_id # Call the API to get", "# association between this class instance and the uploaded file. upload_results = self._send_web_request(\"upload\",", "of the submission. \"\"\" submission_id = ( submission_id if submission_id is not None", "the temporary directory. delete_after : bool, default = True Delete the file after", "session_key : string The session key for this login session. \"\"\" # The", "to log into the API system. Parameters ---------- apikey : string The API", "file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text) #", "if temp_filename is not None else upload_filename + \"_wcs\" ) # The full", "table upload is sent to the API, the submission ID is\" \" saved", "be done once when the image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id", "\"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce what the error is.", "which correspond to the job id. Parameters ---------- filename : str The filename", "from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the", "easier # association between this class instance and the uploaded file. upload_results =", "there being a submission for that\" \" job to operate on.\" ) else:", "the upload specifications. Returns ------- results : dictionary The results of the web", "failed, check that the job ID is correct or try\" \" again later.\"", "upload is used. Returns ------- None \"\"\" # Get the proper job ID.", "a new method is made. def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct", "the session works and that the API key given is valid. if not", "dict Arguments which would override the defaults. Returns ------- args : dict The", "of the job that the results should be obtained from. If not provided,", "self.session is not None: args.update({\"session\": self.session}) # The API requires that the data", "None def get_job_results(self, job_id: str = None) -> dict: \"\"\"Get the results of", "a temporary directory. Parameters ---------- job_id : string, default = None The ID", "+ \"_corr\" ) # The full path of the filename derived from saving", "is not job yet associated with this submission. \"\"\" # If the job", "directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) #", "None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def", "service.\" ) return None def __del_job_id(self) -> None: \"\"\"Remove the current job ID", "\"\"\"Generate the correct URL for the desired service. Because astrometry.net uses a convension,", ") -> dict: \"\"\"A wrapper function for sending a webrequest to the astrometry.net", "Parameters ---------- job_id : str, default = None The ID of the job", "This error is likely because the job is still in queue. return None", "\"\"\"Generate the arguments for sending a request. This constructs the needed arguments, replacing", "send, then it must be in the # correct format. Namely, a multipart/form-data", "error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce what the error is. if", "None, silent: bool = True) -> None: \"\"\"The instantiation, connecting to the web", "in queue. if len(job_id_list) == 0: self.__job_id = None else: self.__job_id = job_id_list[-1]", ") results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id:", "the original image, annotations, and WCS header information. - `rdls`: A table of", "\"\"\"Assign the job ID, it should only be done once when the image", "session : string The session ID of this API connection to astrometry.net \"\"\"", "session return session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission ID from the", "if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A", "we can pull # from the API service. Accommodating for capitalization. file_type =", "of the filename derived from saving it in a temporary # directory. corr_filename", "+ \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" + \"--\" + boundary +", "Parameters ---------- submission_id : str, default = None The ID of the submission.", "of files or images to the API. This also determines the submission ID", "job ID of the\" \" submission is saved here.\" ) __job_id = None", "def get_job_results(self, job_id: str = None) -> dict: \"\"\"Get the results of a", "the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects", "login session. \"\"\" # The key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\",", "__doc_job_id = ( \"When file upload or table upload is sent to the", "_send_web_request( self, service: str, args: dict = {}, file_args: dict = None )", "is unknown. raise error.UndiscoveredError(\"Why the web request failed is unknown.\") else: job_id_list =", "sub_id else: raise error.ReadOnlyError( \"The submission ID has already been set by obtaining", "it is downloaded and saved to disk. file_type : str The type of", "\"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs) -> dict:", "error.WebRequestError( \"The provided file type to be downloaded is not a valid type", "if self.session is not None: args.update({\"session\": self.session}) # The API requires that the", "initial guess # specified byt he centers, and its maximal deviation as specified", "key of the user. silent : bool, default = True Should there be", "The session ID of this API connection to astrometry.net \"\"\" # The default", "None, delete_after: bool = True ) -> hint.Table: \"\"\"This obtains the table that", "terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For visibility by the general", "arguments being sent over the web request. file_args : dictionary, default = None", "be printed messages as the processes are executed. This is helpful for debugging", "job id without there being a submission for that\" \" job to operate", "is not None else self.job_id # Ensure that the type provided is a", "pull # from the API service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types", "http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are for licensing and distribution terms.", "here.\" ) __job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self,", "service was successful. status = job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError(", "file_args = None try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args =", "x-www-form-encoded headers = {} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data =", "!= \"success\": raise error.WebRequestError( \"The job result request failed, check that the job", "status = job_result.get(\"status\") finally: return status # Should not get here. raise error.LogicFlowError", "form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\"", "provided, when known, helps the # processing a little. (\"scale_units\", None, str), (\"scale_type\",", "uploaded. - Tags : Known tagged objects in the image, people inputted. -", "is not None else upload_filename + \"_corr\" ) # The full path of", "job ID, it should only be done once when the image is obtained.\"\"\"", "distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For visibility by the", "then there is no reason to # call the API again. if self.__job_id", "is not a valid type which can\" \" be downloaded, it must be", "Returns the results as well. Parameters ---------- service : string The service which", "saves it. Parameters ---------- pathname : str The pathname of the file to", "\"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) +", "__, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary file after", "astrometry.net. This API does not have the full functionality of the default Python", "if url is not None else _DEFAULT_BASE_URL ) # Use the API key", "be properly processed. This is likely\" \" from a bad web request.\" )", "\"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str(", "wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file after loading it if desired.", "None The ID of the submission. If it is not passed, the ID", "not run yet, None is returned instead. \"\"\" job_id = job_id if job_id", "their read-only nature. del self.submission_id, self.job_id # Save the file information. self.original_upload_filename =", "new_value}) elif defaultdex is not None: args.update({keydex: defaultdex}) return args def _send_web_request( self,", "the image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id else: raise", "image. - `corr`: A table of the correspondences between reference stars location in", "a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to send", "giving an acceptable http status code.\" \" It is likely that the job", "of the file to open. The filename is extracted and used as well.", "submission ID has already been set by obtaining it from the API\" \"", "The status of the submission. If the job has not run yet, None", "Get the proper job ID. job_id = job_id if job_id is not None", "False) # Check if the session works and that the API key given", "None, float), (\"scale_est\", None, float), (\"scale_err\", None, float), # These parameters allows for", "request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try: file =", "reference stars and their pixel locations. It is obtained from the fits corr", "# Get the proper job ID. job_id = job_id if job_id is not", "the web request if it did not fail. \"\"\" # Obtain the session", "boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n'", "\"\"\" # The default arguments for uploading files. In (key, value, type) form.", "The service which is being requested. The web URL is constructed from this", "float), (\"radius\", None, float), # Image properties, preprocessing it a little can help", "= submission_results.get(\"jobs\", []) # If there are no jobs, then it is likely", "when the image is obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id else:", "These parameters are for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\",", "point of this class is to be simple enough to be understood by", "return None def __set_job_id(self, job_id) -> None: \"\"\"Assign the job ID, it should", "desired service URL. Parameters ---------- service : str The service which the API", "def __init__(self, url=None, apikey: str = None, silent: bool = True) -> None:", "an error status message: \\n {message}\".format( message=error_message ) ) else: return result except", "else self.job_id # Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) status", "sure it matches the upload specifications. Returns ------- results : dictionary The results", "instantiated and # logged into. Use this session key for requests. if self.session", "the file type `ftype` and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype,", "AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API for astrometry.net. This API does", "to the web API using the API key. Parameters ---------- url : string,", "Returns ------- args : dict The arguments which can be used to send", "is obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The", "job to operate on.\" ) else: # What happened is unknown. raise error.UndiscoveredError(\"Why", "submission_id : str The ID of the submission. If it is not passed,", "`id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id)", "to upload the data. session : string The session ID of this API", "result request failed, check that the job ID is correct or try\" \"", "a convension, we can follow it to obtain the desired service URL. Parameters", "used. Returns ------- status : string The status of the submission. \"\"\" submission_id", "= \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"]", "and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url", "None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers =", "= sub_id else: raise error.ReadOnlyError( \"The submission ID has already been set by", "upload_results return upload_results def download_result_file( self, filename: str, file_type: str, job_id: str =", "are derived from. This should be used if the API is a self-hosted", "of the following: - `wcs`: The world corrdinate data table file. - `new_fits`,", "service. Because astrometry.net uses a convension, we can follow it to obtain the", "the error is. if error_message == \"bad apikey\": raise error.WebRequestError( \"The API key", "{path}\".format(path=pathname)) # Extract the submission id. This allows for easier # association between", "desired service. Because astrometry.net uses a convension, we can follow it to obtain", "str) -> str: \"\"\"Construct the file curl from the file type `ftype` and", "can follow it to obtain the desired service URL. Parameters ---------- service :", "They must be reset because of their read-only nature. del self.submission_id, self.job_id #", "again. if self.__job_id is not None: return self.__job_id # Call the API to", "__ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary", "status. status = result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") #", "\"(none)\") # Try to deduce what the error is. if error_message == \"bad", "should not flow beyond this point. raise error.LogicFlowError return None def get_job_results(self, job_id:", "more helpful error message for what is going on. if self.submission_id is None:", "for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For", "(\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For visibility by the general public.", "filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" + \"--\"", ": dict Arguments which would override the defaults. Returns ------- args : dict", "the file upload. file_args = None try: file = open(pathname, \"rb\") filename =", "provided is a valid type which we can pull # from the API", "of the job. - Calibration : Calibration of the image uploaded. - Tags", "error.ReadOnlyError( \"The submission ID has already been set by obtaining it from the", "return None def __del_job_id(self) -> None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id", "job_id) -> None: \"\"\"Assign the job ID, it should only be done once", "is a self-hosted install or has a different web source than nova.astrometry.net. Defaults", "else: # Check the job status. status = job_result.get(\"status\") finally: return status #", "None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because", ": string The status of the submission. \"\"\" submission_id = ( submission_id if", "if self.__submission_id is None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission ID", "the image, people inputted. - Machine Tags : Ditto for tags, but only", "Ditto for tags, but only via machine inputs. - Objects in field :", "to open. The filename is extracted and used as well. Returns ------- results", "The logic should not flow beyond this point. raise error.LogicFlowError return None def", "base url which all other API URL links are derived from. This should", "are executed. This is helpful for debugging or similar processes. Returns ------- None", "args = {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs:", "= kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex:", "- Status : The status of the job. - Calibration : Calibration of", "Otherwise, the form should be standard encoded: x-www-form-encoded headers = {} data =", "correct format. Namely, a multipart/form-data format. if file_args is not None: boundary_key =", "str = None) -> str: \"\"\"Get the status of a job specified by", "table which details the correlation between the coordinates of the stars and their", "The pathname of the file to open. The filename is extracted and used", "= None) -> dict: \"\"\"Get the results of a job sent to the", "None, float), # Image properties, preprocessing it a little can help in its", "should be standard encoded: x-www-form-encoded headers = {} data = {\"request-json\": json_data} data", ": dict The results of the astrometry.net job. They are, in general: (If", "typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex: defaultdex}) return args def", "# For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done.", "and the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return", ") # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load", "the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not None else _DEFAULT_BASE_URL", "\"The file download link is not giving an acceptable http status code.\" \"", "valid status. status = result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\")", "\" submission is saved here.\" ) __job_id = None job_id = property(__get_job_id, __set_job_id,", "Table The table which details the correlation between the coordinates of the stars", "def __get_job_id(self) -> str: \"\"\"Extract the job ID from the image upload results.", ": dictionary, default = None If a file is being uploaded instead, special", "file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file after", "to log in a derive a session key. self.session = None session_key =", "str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None, float),", "try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text)", "by the file upload is used. Returns ------- result : dict The result", "elif defaultdex is not None: args.update({keydex: defaultdex}) return args def _send_web_request( self, service:", "self.job_id # Download the correlation file to read into a data table. upload_filename", "used to log in. original_upload_filename : string The original filename that was used", "in the temporary directory. delete_after : bool, default = True Delete the file", "(If the job has not finished yet, None is returned.) - Status :", "self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a more helpful error message for", "= library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary file after loading it", "- `rdls`: A table of reference stars nearby. - `axy`: A table in", "# Check that the service was successful. status = job_result.get(\"status\", False) if status", "used to upload the data. session : string The session ID of this", "ID determined by the file upload is used. temp_filename : string, default =", "to the API, the job ID of the\" \" submission is saved here.\"", "for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self,", "------- results : dict The results of the API call to upload the", "functionality of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this", "the form should be standard encoded: x-www-form-encoded headers = {} data = {\"request-json\":", "is not None else _DEFAULT_BASE_URL ) # Use the API key to log", "(\"scale_err\", None, float), # These parameters allows for the establishment of an initial", "+ \"\\r\\n\" + json_data + \"\\n\" + \"--\" + boundary + \"\\n\" +", "a derive a session key. self.session = None session_key = self.__login(apikey=apikey) self._apikey =", "key to log in a derive a session key. self.session = None session_key", "collection of most everything above. \"\"\" job_id = job_id if job_id is not", "Returns ------- results : dict The results of the API call to upload", "\"\"\"Remove the current submission ID association.\"\"\" self.__submission_id = None return None __doc_submission_id =", "**kwargs : dict Arguments which would override the defaults. Returns ------- args :", "specifications. Returns ------- results : dictionary The results of the web request if", "The arguments which can be used to send the request. \"\"\" args =", "None return None __doc_submission_id = ( \"When file upload or table upload is", "None, bool), # These parameters are needed if being sent instead is an", "is used. Returns ------- status : string The status of the submission. \"\"\"", "check that the job ID is correct or try\" \" again later.\" )", "= ( \"When file upload or table upload is sent to the API,", "= self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check if the session works", "= None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely", "file, the submission and job IDs will change. # They must be reset", "job ID. job_id = job_id if job_id is not None else self.job_id #", "job IDs will change. # They must be reset because of their read-only", "# Check the job status. status = job_result.get(\"status\") finally: return status # Should", "made. def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the file curl from", ") return None def __del_job_id(self) -> None: \"\"\"Remove the current job ID association.\"\"\"", "self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments", "= None The filename that the downloaded correlation file will be downloaded as.", "import random import astropy.wcs as ap_wcs import opihiexarata.library as library import opihiexarata.library.error as", "results[\"status\"] = status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string)", "dict: \"\"\"A wrapper to allow for the uploading of files or images to", "the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file", "URL. Parameters ---------- service : str The service which the API URL for", "the file upload is used. Returns ------- None \"\"\" # Get the proper", "full functionality of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of", "= job_id if job_id is not None else self.job_id # Get the result", "{} return None def __login(self, apikey: str) -> str: \"\"\"The method to log", "locations. It is obtained from the fits corr file that is downloaded into", "job_id is not None else self.job_id # Get the result of the job.", "star positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None, list), (\"y\", None,", "as specified # by the radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\",", "_construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the file curl from the file", "queue. if len(job_id_list) == 0: self.__job_id = None else: self.__job_id = job_id_list[-1] return", "-> hint.Table: \"\"\"This obtains the table that correlates the location of reference stars", "status : string The status of the submission. \"\"\" submission_id = ( submission_id", "as hint # The base URL for the API which all other service", "hint # The base URL for the API which all other service URLs", "corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\",", "that the service was successful. status = job_result.get(\"status\", False) if status != \"success\":", "str: \"\"\"Extract the job ID from the image upload results. It may be", "the status of a submission specified by its ID. Parameters ---------- submission_id :", "ID from the image upload results. It may be the case that there", ") corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname,", "the field, with annotations. - Info : A collection of most everything above.", "filename derived from saving it in a temporary # directory. corr_filename = library.path.merge_pathname(", "id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type,", "args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) #", "the API. This also determines the submission ID and the job ID for", "to disk. file_type : str The type of file to be downloaded from", "but only via machine inputs. - Objects in field : Known objects in", "self-hosted install or has a different web source than nova.astrometry.net. Defaults to the", ") ) else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output", "return None def get_job_results(self, job_id: str = None) -> dict: \"\"\"Get the results", "error.WebRequestError: # This error is likely because the job is still in queue.", ") else: raise error.WebRequestError( \"The server returned an error status message: \\n {message}\".format(", "None, float), (\"center_dec\", None, float), (\"radius\", None, float), # Image properties, preprocessing it", "id without there being a submission for that\" \" job to operate on.\"", "job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a", "Parameters ---------- service : str The service which the API URL for should", "Machine Tags : Ditto for tags, but only via machine inputs. - Objects", "+ data_post.encode() else: # Otherwise, the form should be standard encoded: x-www-form-encoded headers", "# For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the", "defaultdex is not None: args.update({keydex: defaultdex}) return args def _send_web_request( self, service: str,", "else: raise error.ReadOnlyError( \"The job ID has already been set by obtaining it", "`ftype` and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return", "-> dict: \"\"\"A wrapper to allow for the uploading of files or images", "the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make", "self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results def download_result_file( self, filename: str,", "operate on.\" ) else: # What happened is unknown. raise error.UndiscoveredError(\"Why the web", "---------- submission_id : str, default = None The ID of the submission. If", "using Astropy. Parameters ---------- job_id : string, default = None The ID of", "Annotations : Known objects in the field, with annotations. - Info : A", "of the image uploaded. - Tags : Known tagged objects in the image,", "Because astrometry.net computes it for us, we just extract it from the header", "(\"publicly_visible\", \"y\", str), # Image scaling parameters, if provided, when known, helps the", "and thus the data files\" \" are not ready.\" ) # Download the", "World Coordinate System solution from it. Because astrometry.net computes it for us, we", "__doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the job ID from the image", "self._generate_upload_args(**kwargs) # Process the file upload. file_args = None try: file = open(pathname,", "at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to be simple enough to", "keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex) new_value", "URL for the desired service. Because astrometry.net uses a convension, we can follow", "messages as the processes are executed. This is helpful for debugging or similar", "\"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\"", "the job has not run yet, None is returned instead. \"\"\" job_id =", "people inputted. - Machine Tags : Ditto for tags, but only via machine", ": bool, default = True Delete the file after downloading it to extract", "# If the request requires that a file be send, then it must", "---------- pathname : str The pathname of the file to open. The filename", "provided, the ID determined by the file upload is used. Returns ------- results", "__doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate the correct URL for the", "The API key of the user. silent : bool, default = True Should", "already has been obtained, then there is no reason to # call the", "from the API service.\" ) return None def __del_job_id(self) -> None: \"\"\"Remove the", "# correct format. Namely, a multipart/form-data format. if file_args is not None: boundary_key", "typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value)", "If not provided, the ID determined by the file upload is used. temp_filename", "------- status : string The status of the submission. If the job has", "= None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) ->", "if submission_id is not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status =", "is not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return", "API service. Parameters ---------- job_id : str, default = None The ID of", "The filename is extracted and used as well. Returns ------- results : dict", "data.encode(\"utf-8\") # Finally send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing", "string, default = None The base url which all other API URL links", "Load the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) #", "image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id else: raise error.ReadOnlyError(", "str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types:", "\"\\r\\n\" + json_data + \"\\n\" + \"--\" + boundary + \"\\n\" + \"Content-Type:", "-> str: \"\"\"Get the status of a job specified by its ID. Parameters", "the API is a self-hosted install or has a different web source than", "source star positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None, list), (\"y\",", "log into the API system. Parameters ---------- apikey : string The API key", "and saved to disk. file_type : str The type of file to be", "be downloaded is not a valid type which can\" \" be downloaded, it", "used to send the request. \"\"\" args = {} for keydex, defaultdex, typedex", "-> dict: \"\"\"Generate the arguments for sending a request. This constructs the needed", "str = None) -> dict: \"\"\"Get the results of a job sent to", "status = result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try", "which all other service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine):", "- `wcs`: The world corrdinate data table file. - `new_fits`, `new_image`: A new", "= None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session = session_key # Placeholder", "string The service which is being requested. The web URL is constructed from", "different from the # normal API scheme so a new method is made.", "their pixel locations. \"\"\" job_id = job_id if job_id is not None else", "default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to", "defaults with user provided arguments where desired. Parameters ---------- **kwargs : dict Arguments", "= None, temp_filename: str = None, delete_after: bool = True ) -> hint.Table:", "its ID. Parameters ---------- job_id : str, default = None The ID of", "URL for the API which all other service URLs are derived from. _DEFAULT_BASE_URL", "type provided is a valid type which we can pull # from the", "Returns ------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if", "x,y list of # source star positions. (\"image_width\", None, int), (\"image_height\", None, int),", "Astropy WCS The world coordinate solution class for the image provided. \"\"\" job_id", "= None If a file is being uploaded instead, special care must be", "if status != \"success\": raise error.WebRequestError( \"The job result request failed, check that", "submission ID, it should only be done once when the image is obtained.\"\"\"", "of the job. service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string)", "specified by its ID. Parameters ---------- submission_id : str The ID of the", "error.LogicFlowError return None def __set_job_id(self, job_id) -> None: \"\"\"Assign the job ID, it", "temp_filename: str = None, delete_after: bool = True ) -> hint.Table: \"\"\"This obtains", "job_id : string, default = None The ID of the job that the", "to log in. original_upload_filename : string The original filename that was used to", "service. apikey : string The API key of the user. silent : bool,", "\"\"\"A python-based wrapper around the web API for astrometry.net. This API does not", "from astrometry.net. It should one of the following: - `wcs`: The world corrdinate", "str The ID of the submission. If it is not passed, the ID", "link is not giving an acceptable http status code.\" \" It is likely", "= library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id", "API service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\",", "finally: return status # Should not get here. raise error.LogicFlowError return None def", "provided file type to be downloaded is not a valid type which can\"", "finished yet, None is returned.) - Status : The status of the job.", "temp_filename is not None else upload_filename + \"_wcs\" ) # The full path", "\" saved here.\" ) __submission_id = None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id,", "= \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return results def get_job_status(self, job_id:", "table files which correspond to the job id. Parameters ---------- filename : str", "from the image upload results. It may be the case that there is", "the following: - `wcs`: The world corrdinate data table file. - `new_fits`, `new_image`:", "\"The server returned an error status message: \\n {message}\".format( message=error_message ) ) else:", "None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is", "saved here.\" ) __job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def", "def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the file curl from the", "correlation between the coordinates of the stars and their pixel locations. \"\"\" job_id", "headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\"", "so a new method is made. def _construct_file_download_url(ftype: str, id: str) -> str:", "service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: #", "def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper to allow for the", "with user provided arguments where desired. Parameters ---------- **kwargs : dict Arguments which", "list), (\"y\", None, list), (\"album\", None, str), ] def __init__(self, url=None, apikey: str", "results of a job sent to the API service. Parameters ---------- job_id :", "\"\"\" # The key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session", "# Check if the session works and that the API key given is", "Make a more helpful error message for what is going on. if self.submission_id", "# The URL which to send this request to, constructed from the service", "the location of reference stars and their pixel locations. It is obtained from", "stars and their pixel locations. It is obtained from the fits corr file", "a valid session.\" ) else: # The session should be fine. session_key =", "-> None: \"\"\"Assign the submission ID, it should only be done once when", "file.read()} except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the submission", "service: str, args: dict = {}, file_args: dict = None ) -> dict:", "to extract its information. Returns ------- wcs : Astropy WCS The world coordinate", "data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename", "API call to upload the image. \"\"\" # When uploading a new file,", "loading it if desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id:", "by its ID. Parameters ---------- submission_id : str The ID of the submission.", "upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self,", "(\"center_dec\", None, float), (\"radius\", None, float), # Image properties, preprocessing it a little", "of a submission specified by its ID. Parameters ---------- submission_id : str, default", "is. if error_message == \"bad apikey\": raise error.WebRequestError( \"The API key provided is", "is not a valid key.\" ) else: raise error.WebRequestError( \"The server returned an", "def _send_web_request( self, service: str, args: dict = {}, file_args: dict = None", "call the API again. if self.__job_id is not None: return self.__job_id # Call", "image uploaded. - Tags : Known tagged objects in the image, people inputted.", "session key. self.session = None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session =", "------- wcs : Astropy WCS The world coordinate solution class for the image", "This is likely\" \" from a bad web request.\" ) # The logic", "None def get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get the results of", "# All done. return results def get_job_status(self, job_id: str = None) -> str:", "reason to # call the API again. if self.__job_id is not None: return", "default = True Should there be printed messages as the processes are executed.", "api service. apikey : string The API key of the user. silent :", "_DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API for", "del self.submission_id, self.job_id # Save the file information. self.original_upload_filename = pathname args =", "ID association.\"\"\" self.__job_id = None return None __doc_job_id = ( \"When file upload", "upload or table upload is sent to the API, the job ID of", "submission_id : str, default = None The ID of the submission. If it", "API for astrometry.net. This API does not have the full functionality of the", "The API requires that the data format must be a JSON based datatype.", "the job ID is correct or try\" \" again later.\" ) else: results", "table file. - `new_fits`, `new_image`: A new fits file, containing the original image,", "no job to download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError(", "else self.job_id # Download the correlation file to read into a data table.", "can help in its # determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\",", "status of the job. - Calibration : Calibration of the image uploaded. -", "raise error.UndiscoveredError(\"Why the web request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", [])", "library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download link is not giving an", "= job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError( \"The job result request", "submission ID association.\"\"\" self.__submission_id = None return None __doc_submission_id = ( \"When file", "has already been set by obtaining it from the API service.\" ) return", "+ \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' +", "\"_corr\" ) # The full path of the filename derived from saving it", "not passed, the ID determined by the file upload is used. Returns -------", "not flow beyond this point. raise error.LogicFlowError return None def get_job_results(self, job_id: str", "once when the image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id", "status = None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is", "= self.__login(apikey=apikey) self._apikey = apikey self.session = session_key # Placeholder variables. self.original_upload_filename =", "None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id = None return None __doc_submission_id", "job_id is not None else self.job_id # Ensure that the type provided is", "Before downloading the file, check that the file actually exists. if job_id is", "= None else: # Check the job status. status = job_result.get(\"status\") finally: return", "the result of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except", "Call the API to get the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id", "data_pre = str( \"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version:", "has not finished yet, None is returned.) - Status : The status of", "Construct the URL for the request. It is a little different from the", "then computes World Coordinate System solution from it. Because astrometry.net computes it for", "provided arguments where desired. Parameters ---------- **kwargs : dict Arguments which would override", "API service.\" ) return None def __del_job_id(self) -> None: \"\"\"Remove the current job", "taken to sure it matches the upload specifications. Returns ------- results : dictionary", "not None: return self.__job_id # Call the API to get the job ID.", "yet, None is returned.) - Status : The status of the job. -", "queue. status = None else: # Check the job status. status = job_result.get(\"status\")", "the session key derived when this class is instantiated and # logged into.", "submission is saved here.\" ) __job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id,", "because the job is still in queue. status = None else: # Check", "def __del_job_id(self) -> None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id = None", "if provided, when known, helps the # processing a little. (\"scale_units\", None, str),", "sent to the API, the submission ID is\" \" saved here.\" ) __submission_id", "determined by the file upload is used. Returns ------- results : dict The", "source than nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey : string The", "ID of this API connection to astrometry.net \"\"\" # The default arguments for", "filename : str The filename of the file when it is downloaded and", "ID of the submission. If it is not passed, the ID determined by", "error.WebRequestError( \"The job result request failed, check that the job ID is correct", "file after loading it if desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self,", "It should one of the following: - `wcs`: The world corrdinate data table", "service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs) ->", "= \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in field. service_string =", "__set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate the correct URL", "obtained, then there is no reason to # call the API again. if", "\"\"\"Construct the file curl from the file type `ftype` and the job id", "file upload or table upload is sent to the API, the job ID", "upload the image. \"\"\" # When uploading a new file, the submission and", "being sent instead is an x,y list of # source star positions. (\"image_width\",", "error message for what is going on. if self.submission_id is None: raise error.WebRequestError(", "self.__submission_id is None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission ID has", "= job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id) -> None:", "again later.\" ) else: results = {} # For the status. results[\"status\"] =", "status = None else: # Check the job status. status = job_result.get(\"status\") finally:", "None The filename that the downloaded correlation file will be downloaded as. The", "to astrometry.net \"\"\" # The default arguments for uploading files. In (key, value,", "error is. if error_message == \"bad apikey\": raise error.WebRequestError( \"The API key provided", "list of # source star positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\",", "200: raise error.WebRequestError( \"The file download link is not giving an acceptable http", "= None The ID of the job that the results should be obtained", "job has not run yet, None is returned instead. \"\"\" job_id = job_id", "may be the case that there is not job yet associated with this", "self._image_return_results = upload_results return upload_results def download_result_file( self, filename: str, file_type: str, job_id:", "that the API key given is valid. if not session: raise error.WebRequestError( \"The", "in a derive a session key. self.session = None session_key = self.__login(apikey=apikey) self._apikey", "string, default = None The filename that the downloaded correlation file will be", "and be specialized for OpihiExarata. Attributes ---------- _apikey : string The API key", "key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False)", "results. It may be the case that there is not job yet associated", "error.LogicFlowError return None def get_job_results(self, job_id: str = None) -> dict: \"\"\"Get the", "# Processing the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text =", "which can be used to send the request. \"\"\" args = {} for", "parameters are needed if being sent instead is an x,y list of #", "the file upload is used. Returns ------- result : dict The result of", "temp_filename if temp_filename is not None else upload_filename + \"_wcs\" ) # The", "The status of the submission. \"\"\" submission_id = ( submission_id if submission_id is", "little can help in its # determination. (\"parity\", None, int), (\"downsample_factor\", None, int),", "request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result =", "ID for the uploaded image and saves it. Parameters ---------- pathname : str", "\"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided", "result of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError:", "None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool), # These", "upload. file_args = None try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args", "which is being requested. The web URL is constructed from this string. args", "for the uploading of files or images to the API. This also determines", "was successful. status = job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError( \"The", "= ( str(url) if url is not None else _DEFAULT_BASE_URL ) # Use", "obtained from the fits corr file that is downloaded into a temporary directory.", "used. temp_filename : string, default = None The filename that the downloaded wcs", "be in the # correct format. Namely, a multipart/form-data format. if file_args is", "of the user. silent : bool, default = True Should there be printed", "service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id)", "not giving an acceptable http status code.\" \" It is likely that the", "Known tagged objects in the image, people inputted. - Machine Tags : Ditto", "is likely because the job is still in queue. return None # Check", ") # Use the API key to log in a derive a session", "**kwargs) -> dict: \"\"\"Generate the arguments for sending a request. This constructs the", "image, annotations, and WCS header information. - `rdls`: A table of reference stars", "1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" + \"--\"", "= self._send_web_request(service=service_string) # All done. return results def get_job_status(self, job_id: str = None)", "# This error is likely because the job is still in queue. status", "float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None, float), # These parameters", "The arguments being sent over the web request. file_args : dictionary, default =", "self, job_id: str = None, temp_filename: str = None, delete_after: bool = True", "job_id is not None else self.job_id # Download the correlation file to read", "a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path(", "and used as well. Returns ------- results : dict The results of the", "will change. # They must be reset because of their read-only nature. del", "later.\" ) else: results = {} # For the status. results[\"status\"] = status", "text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data +", "the current job ID association.\"\"\" self.__job_id = None return None __doc_job_id = (", "status of a submission specified by its ID. Parameters ---------- submission_id : str,", "\"The job ID has already been set by obtaining it from the API", "if len(job_id_list) == 0: self.__job_id = None else: self.__job_id = job_id_list[-1] return self.__job_id", "that the job ID is correct or try\" \" again later.\" ) else:", "web request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If there", "url : string, default = None The base url which all other API", "job to download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The", "type which we can pull # from the API service. Accommodating for capitalization.", "of most everything above. \"\"\" job_id = job_id if job_id is not None", "by its ID. Parameters ---------- submission_id : str, default = None The ID", "= None, delete_after: bool = True ) -> hint.Table: \"\"\"This obtains the table", "library.json.json_to_dictionary(json_string=text) # Check if the status of the request provided is a valid", ": string The API key used to log in. original_upload_filename : string The", "self._image_return_results = {} return None def __login(self, apikey: str) -> str: \"\"\"The method", "https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to be simple enough to be", "derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web", "saving it in a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" )", "be a job id without there being a submission for that\" \" job", "\"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided file type to", "must be one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the URL for", "error is likely because the job is still in queue. status = None", "True) -> None: \"\"\"The instantiation, connecting to the web API using the API", "API key did not provide a valid session.\" ) else: # The session", "parameters are for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str),", "= str( \"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\"", "for the desired service. Because astrometry.net uses a convension, we can follow it", "\"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise, the form", "file and then computes World Coordinate System solution from it. Because astrometry.net computes", "= self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) #", "_construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check that the file actually exists.", "from the fits corr file that is downloaded into a temporary directory. Parameters", "valid type which we can pull # from the API service. Accommodating for", "to, constructed from the service # desired. api_url = self._generate_service_url(service=service) # If the", "new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex: defaultdex}) return", "raise error.WebRequestError( \"The job result request failed, check that the job ID is", "None, float), # These parameters allows for the establishment of an initial guess", "little different from the # normal API scheme so a new method is", "filename that was used to upload the data. session : string The session", "headers = {} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\")", "in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is", "Delete the temporary file after loading it if desired. if delete_after: os.remove(corr_pathname) return", "(\"invert\", None, bool), # These parameters are needed if being sent instead is", "= \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"]", "None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None,", "# Make a more helpful error message for what is going on. if", "result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check if the session", "of the submission. If it is not passed, the ID determined by the", "request. This constructs the needed arguments, replacing the defaults with user provided arguments", "= data.encode(\"utf-8\") # Finally send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) #", ": dictionary, default = {} The arguments being sent over the web request.", ") data_post = \"\\n\" + \"--\" + boundary + \"--\\n\" data = data_pre.encode()", "If the request requires that a file be send, then it must be", "args, file_args) self._image_return_results = upload_results return upload_results def download_result_file( self, filename: str, file_type:", "Parameters ---------- submission_id : str The ID of the submission. If it is", "annotations, and WCS header information. - `rdls`: A table of reference stars nearby.", "data_post = \"\\n\" + \"--\" + boundary + \"--\\n\" data = data_pre.encode() +", "a valid key.\" ) else: raise error.WebRequestError( \"The server returned an error status", "file actually exists. if job_id is None: raise error.WebRequestError(\"There is no job to", "the file when it is downloaded and saved to disk. file_type : str", ") return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check", "None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The job ID has already been", "= {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value", "+ \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format(", "be reset because of their read-only nature. del self.submission_id, self.job_id # Save the", "file when it is downloaded and saved to disk. file_type : str The", "in queue. return None # Check that the service was successful. status =", "Finally send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request.", "are, in general: (If the job has not finished yet, None is returned.)", "printed messages as the processes are executed. This is helpful for debugging or", "filename is extracted and used as well. Returns ------- results : dict The", "# For visibility by the general public. (\"publicly_visible\", \"y\", str), # Image scaling", "= True ) -> hint.WCS: \"\"\"This obtains the wcs header file and then", "results : dict The results of the astrometry.net job. They are, in general:", "ID. Parameters ---------- submission_id : str, default = None The ID of the", "\"y\", str), # Image scaling parameters, if provided, when known, helps the #", "return None def __login(self, apikey: str) -> str: \"\"\"The method to log into", "key provided is not a valid key.\" ) else: raise error.WebRequestError( \"The server", "# Process the file upload. file_args = None try: file = open(pathname, \"rb\")", "sub_id) -> None: \"\"\"Assign the submission ID, it should only be done once", "obtained from. If not provided, the ID determined by the file upload is", "the job that the results should be obtained from. If not provided, the", "= self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) -> None:", "useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are for", "dict: \"\"\"A wrapper function for sending a webrequest to the astrometry.net API service.", "after loading it if desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self,", ": string The API key for the web API service. Returns ------- session_key", "it a little can help in its # determination. (\"parity\", None, int), (\"downsample_factor\",", "# Try to deduce what the error is. if error_message == \"bad apikey\":", "header file using Astropy. Parameters ---------- job_id : string, default = None The", "Defaults to the nova.astrometry.net api service. apikey : string The API key of", ": dictionary The results of the web request if it did not fail.", "of the\" \" submission is saved here.\" ) __job_id = None job_id =", "the file upload is used. Returns ------- status : string The status of", "'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre = str( \"--\" + boundary + \"\\n\"", "when this class is instantiated and # logged into. Use this session key", "web API for astrometry.net. This API does not have the full functionality of", "upload or table upload is sent to the API, the submission ID is\"", "# For the status. results[\"status\"] = status # For the calibrations. service_string =", "constructed from this string. args : dictionary, default = {} The arguments being", "= str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in", "return None def get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get the results", "it must be one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the URL", "association.\"\"\" self.__submission_id = None return None __doc_submission_id = ( \"When file upload or", "for OpihiExarata. Attributes ---------- _apikey : string The API key used to log", "str = None) -> dict: \"\"\"Get the results of a submission specified by", "he centers, and its maximal deviation as specified # by the radius parameter.", "downloaded and saved to disk. file_type : str The type of file to", "job. - Calibration : Calibration of the image uploaded. - Tags : Known", "is used. Returns ------- status : string The status of the submission. If", "job_id else: raise error.ReadOnlyError( \"The job ID has already been set by obtaining", "file after loading it if desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs(", "temp_filename: str = None, delete_after: bool = True ) -> hint.WCS: \"\"\"This obtains", "(\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\",", "install or has a different web source than nova.astrometry.net. Defaults to the nova.astrometry.net", "the coordinates of the stars and their pixel locations. \"\"\" job_id = job_id", "as. The path is going to still be in the temporary directory. delete_after", "else: raise error.ReadOnlyError( \"The submission ID has already been set by obtaining it", "= apikey self.session = session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results =", "and that the API key given is valid. if not session: raise error.WebRequestError(", "valid key.\" ) else: raise error.WebRequestError( \"The server returned an error status message:", "raise error.WebRequestError( \"The API key provided is not a valid key.\" ) else:", "and then computes World Coordinate System solution from it. Because astrometry.net computes it", "filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File", ": string, default = None The filename that the downloaded wcs file will", "are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the", "# directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename )", "`new_image`: A new fits file, containing the original image, annotations, and WCS header", "job id. Parameters ---------- filename : str The filename of the file when", "raise error.WebRequestError(\"There is no job to download the file from.\") if library.http.get_http_status_code(url=file_download_url) !=", "__del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate the correct URL for", "not a valid type which can\" \" be downloaded, it must be one", "urllib.parse import urllib.request import urllib.error import random import astropy.wcs as ap_wcs import opihiexarata.library", "case that there is not job yet associated with this submission. \"\"\" #", "string The status of the submission. \"\"\" submission_id = ( submission_id if submission_id", "raise error.WebRequestError( \"The file download link is not giving an acceptable http status", "the job ID for the uploaded image and saves it. Parameters ---------- pathname", "None \"\"\" # Get the proper job ID. job_id = job_id if job_id", "desired. api_url = self._generate_service_url(service=service) # If the request requires that a file be", "the API key to log in a derive a session key. self.session =", "API URL for should be generated from. Returns ------- url : str The", "information. Returns ------- correlation_table : Table The table which details the correlation between", "int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool),", "\"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] =", "multipart/form-data format. if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in", "str, args: dict = {}, file_args: dict = None ) -> dict: \"\"\"A", "it in a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname", "ID of the\" \" submission is saved here.\" ) __job_id = None job_id", "uploaded image and saves it. Parameters ---------- pathname : str The pathname of", "= library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does", "this class is instantiated and # logged into. Use this session key for", "likely that the job is still processing and thus the data files\" \"", "job_id : str, default = None The ID of the job that the", "of the stars and their pixel locations. \"\"\" job_id = job_id if job_id", "that the job is still processing and thus the data files\" \" are", "self._send_web_request(service=service_string) # For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string)", "error import opihiexarata.library.hint as hint # The base URL for the API which", "+ 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" + \"--\" +", "processed. This is likely\" \" from a bad web request.\" ) # The", ": str The pathname of the file to open. The filename is extracted", "= self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because the job is", "to operate on.\" ) else: # What happened is unknown. raise error.UndiscoveredError(\"Why the", "self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data from the file. __,", "= result.get(\"errormessage\", \"(none)\") # Try to deduce what the error is. if error_message", "the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string", "-> None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id = None return None", "raise error.LogicFlowError return None def __set_job_id(self, job_id) -> None: \"\"\"Assign the job ID,", "id=job_id) # Before downloading the file, check that the file actually exists. if", "seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to be simple enough", "of the file when it is downloaded and saved to disk. file_type :", "job_id if job_id is not None else self.job_id # Download the correlation file", "the file to open. The filename is extracted and used as well. Returns", "They are, in general: (If the job has not finished yet, None is", "= session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results = {} return None", "= None return None __doc_submission_id = ( \"When file upload or table upload", "return correlation_table def get_wcs( self, job_id: str = None, temp_filename: str = None,", "job result request failed, check that the job ID is correct or try\"", "return None def __del_submission_id(self) -> None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id", "\"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\"", "not provide a valid session.\" ) else: # The session should be fine.", "inputted. - Machine Tags : Ditto for tags, but only via machine inputs.", "str), (\"allow_modifications\", \"d\", str), # For visibility by the general public. (\"publicly_visible\", \"y\",", "# Ensure that the type provided is a valid type which we can", "centers, and its maximal deviation as specified # by the radius parameter. (In", "__get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the job ID", "objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations.", "None else upload_filename + \"_wcs\" ) # The full path of the filename", "results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string)", "general public. (\"publicly_visible\", \"y\", str), # Image scaling parameters, if provided, when known,", "the full functionality of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point", "service # desired. api_url = self._generate_service_url(service=service) # If the request requires that a", "the API which all other service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\"", "if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str = None, temp_filename:", "Attributes ---------- _apikey : string The API key used to log in. original_upload_filename", "temp_filename : string, default = None The filename that the downloaded wcs file", "correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data from the", "coordinate solution class for the image provided. \"\"\" job_id = job_id if job_id", "apikey self.session = session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results = {}", "else upload_filename + \"_corr\" ) # The full path of the filename derived", "string, default = None The filename that the downloaded wcs file will be", "correlation_table def get_wcs( self, job_id: str = None, temp_filename: str = None, delete_after:", "= job_id if job_id is not None else self.job_id # Ensure that the", "not provided, the ID determined by the file upload is used. Returns -------", "from the API service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\",", "\"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data;", "been obtained, then there is no reason to # call the API again.", "a file is being uploaded instead, special care must be taken to sure", "`rdls`: A table of reference stars nearby. - `axy`: A table in of", "api_url = self._generate_service_url(service=service) # If the request requires that a file be send,", "results[\"info\"] = self._send_web_request(service=service_string) # All done. return results def get_job_status(self, job_id: str =", "None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session = session_key # Placeholder variables.", "None: \"\"\"Downloads fits data table files which correspond to the job id. Parameters", "import urllib.error import random import astropy.wcs as ap_wcs import opihiexarata.library as library import", "the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the", "is sent to the API, the job ID of the\" \" submission is", "of the location of stars detected in the provided image. - `corr`: A", "extract it from the header file using Astropy. Parameters ---------- job_id : string,", "result : dict The result of the submission. \"\"\" submission_id = ( submission_id", "self.job_id # Save the file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) #", "failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If there are no", "well. Returns ------- results : dict The results of the API call to", "download_result_file( self, filename: str, file_type: str, job_id: str = None ) -> None:", "correct or try\" \" again later.\" ) else: results = {} # For", "the status. results[\"status\"] = status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"]", "correlation_table : Table The table which details the correlation between the coordinates of", "results of the web request if it did not fail. \"\"\" # Obtain", "# The API requires that the data format must be a JSON based", "Because astrometry.net uses a convension, we can follow it to obtain the desired", "- Annotations : Known objects in the field, with annotations. - Info :", "new method is made. def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the", "by the file upload is used. Returns ------- None \"\"\" # Get the", "similar processes. Returns ------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = (", "= pathname args = self._generate_upload_args(**kwargs) # Process the file upload. file_args = None", "error status message: \\n {message}\".format( message=error_message ) ) else: return result except urllib.error.HTTPError:", "likely still in queue. if len(job_id_list) == 0: self.__job_id = None else: self.__job_id", "default = None The filename that the downloaded wcs file will be downloaded", "+ boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else: #", "service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in field. service_string", "= None, temp_filename: str = None, delete_after: bool = True ) -> hint.WCS:", "the API, the submission ID is\" \" saved here.\" ) __submission_id = None", "after loading it if desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname:", "# source star positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None, list),", "return self.__job_id # Call the API to get the job ID. try: submission_results", "done. return results def get_job_status(self, job_id: str = None) -> str: \"\"\"Get the", "instantiation, connecting to the web API using the API key. Parameters ---------- url", "= {} # For the status. results[\"status\"] = status # For the calibrations.", "to still be in the temporary directory. delete_after : bool, default = True", "import opihiexarata.library as library import opihiexarata.library.error as error import opihiexarata.library.hint as hint #", "API to get the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except", "image upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def", "file type to be downloaded is not a valid type which can\" \"", "file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary file", "its maximal deviation as specified # by the radius parameter. (In degrees.) (\"center_ra\",", "service.\" ) return None def __del_submission_id(self) -> None: \"\"\"Remove the current submission ID", "the arguments for sending a request. This constructs the needed arguments, replacing the", "float), # These parameters allows for the establishment of an initial guess #", "submission specified by its ID. Parameters ---------- submission_id : str The ID of", "the web API service. Returns ------- session_key : string The session key for", "self.__job_id = None else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return None", "for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format(", "the submission ID, it should only be done once when the image is", "is still in queue. return None # Check that the service was successful.", "result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot be properly processed.", "str, id: str) -> str: \"\"\"Construct the file curl from the file type", "file be send, then it must be in the # correct format. Namely,", "session key for this login session. \"\"\" # The key. args = {\"apikey\":", ": dict The arguments which can be used to send the request. \"\"\"", "raise error.WebRequestError( \"There cannot be a job id without there being a submission", "(\"crpix_center\", None, bool), (\"invert\", None, bool), # These parameters are needed if being", "constructs the needed arguments, replacing the defaults with user provided arguments where desired.", "A collection of most everything above. \"\"\" job_id = job_id if job_id is", "job is still in queue. return None # Check that the service was", "is downloaded and saved to disk. file_type : str The type of file", "status : string The status of the submission. If the job has not", "submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a more helpful error", "key used to log in. original_upload_filename : string The original filename that was", "submission_results.get(\"jobs\", []) # If there are no jobs, then it is likely still", "to send this request to, constructed from the service # desired. api_url =", "get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str = None, delete_after: bool =", "key for the web API service. Returns ------- session_key : string The session", "The result of the submission. \"\"\" submission_id = ( submission_id if submission_id is", "the submission ID and the job ID for the uploaded image and saves", "upload_filename + \"_wcs\" ) # The full path of the filename derived from", "property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the job", "the API call to upload the image. \"\"\" # When uploading a new", "from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download link is not", "with this submission. \"\"\" # If the job ID already has been obtained,", "Returns ------- result : dict The result of the submission. \"\"\" submission_id =", "The API key for the web API service. Returns ------- session_key : string", "ID has already been set by obtaining it from the API\" \" service.\"", "the case that there is not job yet associated with this submission. \"\"\"", "__set_job_id(self, job_id) -> None: \"\"\"Assign the job ID, it should only be done", "job ID is correct or try\" \" again later.\" ) else: results =", "field. - Annotations : Known objects in the field, with annotations. - Info", "can\" \" be downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types ) )", "= urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check", ": Known tagged objects in the image, people inputted. - Machine Tags :", "the defaults. Returns ------- args : dict The arguments which can be used", "self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value})", "sent to the API, the job ID of the\" \" submission is saved", "True ) -> hint.WCS: \"\"\"This obtains the wcs header file and then computes", "filename: str, file_type: str, job_id: str = None ) -> None: \"\"\"Downloads fits", "---------- apikey : string The API key for the web API service. Returns", "int), (\"image_height\", None, int), (\"x\", None, list), (\"y\", None, list), (\"album\", None, str),", "as well. Parameters ---------- service : string The service which is being requested.", "self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header from the file. wcs_header", ") -> None: \"\"\"Downloads fits data table files which correspond to the job", "ID. Parameters ---------- job_id : str, default = None The ID of the", "radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None, float),", "extract its information. Returns ------- wcs : Astropy WCS The world coordinate solution", "\"When file upload or table upload is sent to the API, the submission", "job_id: str = None) -> dict: \"\"\"Get the results of a job sent", "filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) #", "session key for requests. if self.session is not None: args.update({\"session\": self.session}) # The", "This API does not have the full functionality of the default Python client", ": string The API key of the user. silent : bool, default =", "details the correlation between the coordinates of the stars and their pixel locations.", "and their pixel locations. \"\"\" job_id = job_id if job_id is not None", ": str, default = None The ID of the submission. If it is", ": A collection of most everything above. \"\"\" job_id = job_id if job_id", "API which all other service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class", "data_post.encode() else: # Otherwise, the form should be standard encoded: x-www-form-encoded headers =", "not None else upload_filename + \"_corr\" ) # The full path of the", "is constructed from this string. args : dictionary, default = {} The arguments", "Returns ------- results : dictionary The results of the web request if it", "information. Returns ------- wcs : Astropy WCS The world coordinate solution class for", "the request. It is a little different from the # normal API scheme", "= True Should there be printed messages as the processes are executed. This", "be downloaded as. The path is going to still be in the temporary", "is an x,y list of # source star positions. (\"image_width\", None, int), (\"image_height\",", "submission ID is\" \" saved here.\" ) __submission_id = None submission_id = property(", "None If a file is being uploaded instead, special care must be taken", "from the file type `ftype` and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format(", "user. silent : bool, default = True Should there be printed messages as", "of the correspondences between reference stars location in the sky and in pixel", "that the file actually exists. if job_id is None: raise error.WebRequestError(\"There is no", "what is going on. if self.submission_id is None: raise error.WebRequestError( \"There cannot be", "user provided arguments where desired. Parameters ---------- **kwargs : dict Arguments which would", "valid. if not session: raise error.WebRequestError( \"The provided API key did not provide", "self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process the file upload. file_args =", "passed, the ID determined by the file upload is used. Returns ------- result", "service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id)", "the results should be obtained from. If not provided, the ID determined by", "kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not", "{}, file_args: dict = None ) -> dict: \"\"\"A wrapper function for sending", "str( \"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" +", "the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This", "= {\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname))", "format must be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL", "filename=corr_pathname, extension=1 ) # Delete the temporary file after loading it if desired.", "url=None, apikey: str = None, silent: bool = True) -> None: \"\"\"The instantiation,", "did not fail. \"\"\" # Obtain the session key derived when this class", "job. service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError:", "not finished yet, None is returned.) - Status : The status of the", "instead, special care must be taken to sure it matches the upload specifications.", "str The URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return", "used. Returns ------- results : dict The results of the astrometry.net job. They", "upload is used. temp_filename : string, default = None The filename that the", "the astrometry.net API service. Returns the results as well. Parameters ---------- service :", "str(url) if url is not None else _DEFAULT_BASE_URL ) # Use the API", "is not giving an acceptable http status code.\" \" It is likely that", "session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results = {} return None def", "that the results should be obtained from. If not provided, the ID determined", "return self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id) -> None: \"\"\"Assign the", "---------- url : string, default = None The base url which all other", "range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) }", "data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send", "from the # normal API scheme so a new method is made. def", "+ json_data + \"\\n\" + \"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\"", "self.job_id # Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result", "one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the URL for the request.", "default = None The ID of the job that the results should be", "not job yet associated with this submission. \"\"\" # If the job ID", "\"The web request output cannot be properly processed. This is likely\" \" from", "\"\\n\" + \"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\"", "upload is sent to the API, the job ID of the\" \" submission", "original_upload_filename : string The original filename that was used to upload the data.", "form. # Detailed is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS =", "result.get(\"errormessage\", \"(none)\") # Try to deduce what the error is. if error_message ==", "bool), (\"invert\", None, bool), # These parameters are needed if being sent instead", "or table upload is sent to the API, the submission ID is\" \"", "uses a convension, we can follow it to obtain the desired service URL.", "a different web source than nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey", "`corr`: A table of the correspondences between reference stars location in the sky", "service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return results def get_job_status(self,", "web API service. Returns ------- session_key : string The session key for this", "job specified by its ID. Parameters ---------- job_id : str, default = None", "byt he centers, and its maximal deviation as specified # by the radius", "and saves it. Parameters ---------- pathname : str The pathname of the file", "tags, but only via machine inputs. - Objects in field : Known objects", "string The API key used to log in. original_upload_filename : string The original", "in. original_upload_filename : string The original filename that was used to upload the", "file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the status of the request provided", "str), # Image scaling parameters, if provided, when known, helps the # processing", "be standard encoded: x-www-form-encoded headers = {} data = {\"request-json\": json_data} data =", "_apikey : string The API key used to log in. original_upload_filename : string", "is None: raise error.WebRequestError( \"There cannot be a job id without there being", "This constructs the needed arguments, replacing the defaults with user provided arguments where", "------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url", "( str(url) if url is not None else _DEFAULT_BASE_URL ) # Use the", "request output cannot be properly processed. This is likely\" \" from a bad", "try\" \" again later.\" ) else: results = {} # For the status.", "None: \"\"\"Assign the job ID, it should only be done once when the", "submission_id = ( submission_id if submission_id is not None else self.submission_id ) service_string", "\"success\": raise error.WebRequestError( \"The job result request failed, check that the job ID", "run yet, None is returned instead. \"\"\" job_id = job_id if job_id is", "fty=valid_api_file_types ) ) # Construct the URL for the request. It is a", "Check that the service was successful. status = job_result.get(\"status\", False) if status !=", "the image field. - Annotations : Known objects in the field, with annotations.", "file. - `new_fits`, `new_image`: A new fits file, containing the original image, annotations,", "else: # Otherwise, the form should be standard encoded: x-www-form-encoded headers = {}", "if being sent instead is an x,y list of # source star positions.", "# For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) #", "IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the submission id. This", "of file to be downloaded from astrometry.net. It should one of the following:", "for sending a webrequest to the astrometry.net API service. Returns the results as", "= urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try: file = urllib.request.urlopen( request,", "Obtain the session key derived when this class is instantiated and # logged", ") else: results = {} # For the status. results[\"status\"] = status #", "request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If there are", "API key given is valid. if not session: raise error.WebRequestError( \"The provided API", "by the file upload is used. Returns ------- results : dict The results", "a valid status. status = result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\",", "tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in field.", "\" from a bad web request.\" ) # The logic should not flow", "temporary directory. Parameters ---------- job_id : string, default = None The ID of", "reference stars location in the sky and in pixel space. job_id : str,", "dictionary The results of the web request if it did not fail. \"\"\"", "- Info : A collection of most everything above. \"\"\" job_id = job_id", "read into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = (", "is valid. if not session: raise error.WebRequestError( \"The provided API key did not", "be used to send the request. \"\"\" args = {} for keydex, defaultdex,", "apikey: str) -> str: \"\"\"The method to log into the API system. Parameters", "web request if it did not fail. \"\"\" # Obtain the session key", "server returned an error status message: \\n {message}\".format( message=error_message ) ) else: return", "curl from the file type `ftype` and the job id `id`.\"\"\" url =", "URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url def", "not a valid key.\" ) else: raise error.WebRequestError( \"The server returned an error", "Parameters ---------- url : string, default = None The base url which all", "original image, annotations, and WCS header information. - `rdls`: A table of reference", "ID determined by the file upload is used. Returns ------- result : dict", "bd=boundary ) } data_pre = str( \"--\" + boundary + \"\\n\" + \"Content-Type:", "after downloading it to extract its information. Returns ------- correlation_table : Table The", "delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper", "in a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname =", "to be understood by others and be specialized for OpihiExarata. Attributes ---------- _apikey", "= {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check", "(\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None, float), # Image properties, preprocessing", "astrometry.net computes it for us, we just extract it from the header file", "connection to astrometry.net \"\"\" # The default arguments for uploading files. In (key,", "== 0: self.__job_id = None else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError", "self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All", "type `ftype` and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id )", "help in its # determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None,", "the processes are executed. This is helpful for debugging or similar processes. Returns", "This also determines the submission ID and the job ID for the uploaded", "this class is to be simple enough to be understood by others and", "the job has not finished yet, None is returned.) - Status : The", "\"The provided API key did not provide a valid session.\" ) else: #", "\"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"]", "\"\"\"Assign the submission ID, it should only be done once when the image", "self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id) ->", "__del_job_id(self) -> None: \"\"\"Remove the current job ID association.\"\"\" self.__job_id = None return", "links are derived from. This should be used if the API is a", "over the web request. file_args : dictionary, default = None If a file", "Known objects in the image field. - Annotations : Known objects in the", "table of the correspondences between reference stars location in the sky and in", "the correlation between the coordinates of the stars and their pixel locations. \"\"\"", "None __doc_job_id = ( \"When file upload or table upload is sent to", "to be downloaded from astrometry.net. It should one of the following: - `wcs`:", "default = None The base url which all other API URL links are", "results of the API call to upload the image. \"\"\" # When uploading", "the ID determined by the file upload is used. Returns ------- result :", "is helpful for debugging or similar processes. Returns ------- None \"\"\" # Defining", "fits data table files which correspond to the job id. Parameters ---------- filename", "when the image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id else:", "URL is constructed from this string. args : dictionary, default = {} The", "cannot be a job id without there being a submission for that\" \"", "= self._send_web_request(service=service_string) # For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] =", "( submission_id if submission_id is not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id)", "= None) -> dict: \"\"\"Get the results of a submission specified by its", "def _generate_service_url(self, service: str) -> str: \"\"\"Generate the correct URL for the desired", ") __job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service:", "disk. file_type : str The type of file to be downloaded from astrometry.net.", "str, **kwargs) -> dict: \"\"\"A wrapper to allow for the uploading of files", "correspond to the job id. Parameters ---------- filename : str The filename of", "there are no jobs, then it is likely still in queue. if len(job_id_list)", "as library import opihiexarata.library.error as error import opihiexarata.library.hint as hint # The base", "the API service. Parameters ---------- job_id : str, default = None The ID", "yet associated with this submission. \"\"\" # If the job ID already has", ") else: # The session should be fine. session_key = session return session_key", ": str The type of file to be downloaded from astrometry.net. It should", "the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags.", "file_args[\"data\"] + data_post.encode() else: # Otherwise, the form should be standard encoded: x-www-form-encoded", "not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result", "from the API\" \" service.\" ) return None def __del_submission_id(self) -> None: \"\"\"Remove", "is not None else self.job_id # Download the correlation file to read into", ") # Delete the temporary file after loading it if desired. if delete_after:", "session key derived when this class is instantiated and # logged into. Use", "self.__job_id # Call the API to get the job ID. try: submission_results =", "desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str = None,", "\"\"\"Get the status of a submission specified by its ID. Parameters ---------- submission_id", "= job_id else: raise error.ReadOnlyError( \"The job ID has already been set by", "API, the job ID of the\" \" submission is saved here.\" ) __job_id", "None) -> dict: \"\"\"Get the results of a submission specified by its ID.", "passed, the ID determined by the file upload is used. Returns ------- status", "for the image provided. \"\"\" job_id = job_id if job_id is not None", "dict: \"\"\"Get the results of a submission specified by its ID. Parameters ----------", "self, filename: str, file_type: str, job_id: str = None ) -> None: \"\"\"Downloads", "their pixel locations. It is obtained from the fits corr file that is", "arguments where desired. Parameters ---------- **kwargs : dict Arguments which would override the", "results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"]", "Delete the file after downloading it to extract its information. Returns ------- wcs", "pathname args = self._generate_upload_args(**kwargs) # Process the file upload. file_args = None try:", "# logged into. Use this session key for requests. if self.session is not", "# Should not get here. raise error.LogicFlowError return None def get_submission_results(self, submission_id: str", "acceptable http status code.\" \" It is likely that the job is still", "data files\" \" are not ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url,", "determines the submission ID and the job ID for the uploaded image and", "default = None If a file is being uploaded instead, special care must", "for tags, but only via machine inputs. - Objects in field : Known", "to be downloaded is not a valid type which can\" \" be downloaded,", "temporary file after loading it if desired. if delete_after: os.remove(corr_pathname) return wcs def", "job_id: str = None) -> str: \"\"\"Get the status of a job specified", "from it. Because astrometry.net computes it for us, we just extract it from", "valid_api_file_types: raise error.WebRequestError( \"The provided file type to be downloaded is not a", "file, check that the file actually exists. if job_id is None: raise error.WebRequestError(\"There", "image. \"\"\" # When uploading a new file, the submission and job IDs", "between the coordinates of the stars and their pixel locations. \"\"\" job_id =", "the desired service URL. Parameters ---------- service : str The service which the", "open. The filename is extracted and used as well. Returns ------- results :", "correspondences between reference stars location in the sky and in pixel space. job_id", "{fty}\".format( fty=valid_api_file_types ) ) # Construct the URL for the request. It is", "None: \"\"\"The instantiation, connecting to the web API using the API key. Parameters", "wrapper function for sending a webrequest to the astrometry.net API service. Returns the", "special care must be taken to sure it matches the upload specifications. Returns", "is used. temp_filename : string, default = None The filename that the downloaded", "API key provided is not a valid key.\" ) else: raise error.WebRequestError( \"The", "submission_id = ( submission_id if submission_id is not None else self.submission_id ) results", "None else self.job_id # Ensure that the type provided is a valid type", "returned instead. \"\"\" job_id = job_id if job_id is not None else self.job_id", "solution class for the image provided. \"\"\" job_id = job_id if job_id is", "try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a more helpful", "str = None, temp_filename: str = None, delete_after: bool = True ) ->", "the table that correlates the location of reference stars and their pixel locations.", "maximal deviation as specified # by the radius parameter. (In degrees.) (\"center_ra\", None,", "flow beyond this point. raise error.LogicFlowError return None def get_job_results(self, job_id: str =", "the location of stars detected in the provided image. - `corr`: A table", "provided. \"\"\" job_id = job_id if job_id is not None else self.job_id #", "is not passed, the ID determined by the file upload is used. Returns", "send the request. \"\"\" args = {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS:", "upload_filename + \"_corr\" ) # The full path of the filename derived from", "of the request provided is a valid status. status = result.get(\"status\") if status", "uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results def", "determined by the file upload is used. Returns ------- result : dict The", "file upload is used. Returns ------- None \"\"\" # Get the proper job", "If not provided, the ID determined by the file upload is used. Returns", "file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download link is", "For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For", "to the nova.astrometry.net api service. apikey : string The API key of the", "self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str = None,", "this class instance and the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results", "\"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError( \"The provided file", "<reponame>psmd-iberutaru/OpihiExarata import os import urllib.parse import urllib.request import urllib.error import random import astropy.wcs", "submission ID from the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\",", "( temp_filename if temp_filename is not None else upload_filename + \"_wcs\" ) #", "machine inputs. - Objects in field : Known objects in the image field.", ") # Load the data from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname,", "boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = {", "if self.__job_id is None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The job ID", "The table which details the correlation between the coordinates of the stars and", "Use this session key for requests. if self.session is not None: args.update({\"session\": self.session})", "valid session.\" ) else: # The session should be fine. session_key = session", "urllib.error import random import astropy.wcs as ap_wcs import opihiexarata.library as library import opihiexarata.library.error", "should only be done once when the image is obtained.\"\"\" if self.__submission_id is", "\"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not", "file is being uploaded instead, special care must be taken to sure it", "derive a session key. self.session = None session_key = self.__login(apikey=apikey) self._apikey = apikey", "one of the following: - `wcs`: The world corrdinate data table file. -", "None: return self.__job_id # Call the API to get the job ID. try:", "the establishment of an initial guess # specified byt he centers, and its", ") # The full path of the filename derived from saving it in", "which to send this request to, constructed from the service # desired. api_url", "upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results def download_result_file( self,", "downloaded correlation file will be downloaded as. The path is going to still", "service: str) -> str: \"\"\"Generate the correct URL for the desired service. Because", "that\" \" job to operate on.\" ) else: # What happened is unknown.", "will be downloaded as. The path is going to still be in the", "its # determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\",", "location in the sky and in pixel space. job_id : str, default =", "get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get the status of a submission", "is likely that the job is still processing and thus the data files\"", "in its # determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float),", "the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs)", "using the API key. Parameters ---------- url : string, default = None The", "-> str: \"\"\"Extract the job ID from the image upload results. It may", "error.UndiscoveredError(\"Why the web request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) #", "set by obtaining it from the API service.\" ) return None def __del_job_id(self)", "Try to deduce what the error is. if error_message == \"bad apikey\": raise", "- Objects in field : Known objects in the image field. - Annotations", "= self._generate_upload_args(**kwargs) # Process the file upload. file_args = None try: file =", "derived from. This should be used if the API is a self-hosted install", "a valid type which we can pull # from the API service. Accommodating", "service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is", "\"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because", "library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename is not None else", "API\" \" service.\" ) return None def __del_submission_id(self) -> None: \"\"\"Remove the current", "files. In (key, value, type) form. # Detailed is also their useage cases", "message for what is going on. if self.submission_id is None: raise error.WebRequestError( \"There", "raise error.ReadOnlyError( \"The job ID has already been set by obtaining it from", "Detailed is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ #", "and in pixel space. job_id : str, default = None The ID of", "we can follow it to obtain the desired service URL. Parameters ---------- service", "self.job_id # Ensure that the type provided is a valid type which we", "(\"radius\", None, float), # Image properties, preprocessing it a little can help in", "must be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which", "properly processed. This is likely\" \" from a bad web request.\" ) #", "The API key used to log in. original_upload_filename : string The original filename", "for easier # association between this class instance and the uploaded file. upload_results", "# This error is likely because the job is still in queue. return", "request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the", "except error.WebRequestError: # Make a more helpful error message for what is going", "is still in queue. status = None else: # Check the job status.", "table of reference stars nearby. - `axy`: A table in of the location", "= None The base url which all other API URL links are derived", "\"d\", str), # For visibility by the general public. (\"publicly_visible\", \"y\", str), #", "would override the defaults. Returns ------- args : dict The arguments which can", "python-based wrapper around the web API for astrometry.net. This API does not have", "str, default = None The ID of the submission. If it is not", "the nova.astrometry.net api service. apikey : string The API key of the user.", "return status def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str = None,", "job_id if job_id is not None else self.job_id # Get the result of", "file curl from the file type `ftype` and the job id `id`.\"\"\" url", "error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the submission id. This allows for", "than nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey : string The API", "to upload the image. \"\"\" # When uploading a new file, the submission", "-> dict: \"\"\"Get the results of a submission specified by its ID. Parameters", "str = None ) -> None: \"\"\"Downloads fits data table files which correspond", "API scheme so a new method is made. def _construct_file_download_url(ftype: str, id: str)", "results should be obtained from. If not provided, the ID determined by the", "import opihiexarata.library.error as error import opihiexarata.library.hint as hint # The base URL for", "True Should there be printed messages as the processes are executed. This is", "service. Parameters ---------- job_id : str, default = None The ID of the", "be downloaded from astrometry.net. It should one of the following: - `wcs`: The", "__login(self, apikey: str) -> str: \"\"\"The method to log into the API system.", "file to open. The filename is extracted and used as well. Returns -------", "of stars detected in the provided image. - `corr`: A table of the", "instead. \"\"\" job_id = job_id if job_id is not None else self.job_id #", "-> None: \"\"\"Downloads fits data table files which correspond to the job id.", "\"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except IOError: raise", "None) -> str: \"\"\"Get the status of a job specified by its ID.", "Astropy. Parameters ---------- job_id : string, default = None The ID of the", "corr file that is downloaded into a temporary directory. Parameters ---------- job_id :", "arguments for sending a request. This constructs the needed arguments, replacing the defaults", "urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check if", "( temp_filename if temp_filename is not None else upload_filename + \"_corr\" ) #", "sent instead is an x,y list of # source star positions. (\"image_width\", None,", "else: # The session should be fine. session_key = session return session_key def", "= self.ASTROMETRY_BASE_API_URL + service return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the", "- Tags : Known tagged objects in the image, people inputted. - Machine", "= typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex: defaultdex}) return args", "ID from the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None)", "file, containing the original image, annotations, and WCS header information. - `rdls`: A", "used. Returns ------- None \"\"\" # Get the proper job ID. job_id =", "raise error.WebRequestError( \"The provided file type to be downloaded is not a valid", "results def get_job_status(self, job_id: str = None) -> str: \"\"\"Get the status of", "data table file. - `new_fits`, `new_image`: A new fits file, containing the original", "import os import urllib.parse import urllib.request import urllib.error import random import astropy.wcs as", "this request to, constructed from the service # desired. api_url = self._generate_service_url(service=service) #", ": str The ID of the submission. If it is not passed, the", "table that correlates the location of reference stars and their pixel locations. It", "Image scaling parameters, if provided, when known, helps the # processing a little.", "used. temp_filename : string, default = None The filename that the downloaded correlation", "file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process the file upload.", "\"data\": file.read()} except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the", "str = None, delete_after: bool = True ) -> hint.Table: \"\"\"This obtains the", ") def __get_job_id(self) -> str: \"\"\"Extract the job ID from the image upload", "error.WebRequestError( \"There cannot be a job id without there being a submission for", "being a submission for that\" \" job to operate on.\" ) else: #", "\"\"\" args = {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in", "service : string The service which is being requested. The web URL is", "field, with annotations. - Info : A collection of most everything above. \"\"\"", "to deduce what the error is. if error_message == \"bad apikey\": raise error.WebRequestError(", "= \"\\n\" + \"--\" + boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"]", "# These parameters allows for the establishment of an initial guess # specified", "return None # Check that the service was successful. status = job_result.get(\"status\", False)", "by obtaining it from the API service.\" ) return None def __del_job_id(self) ->", "Calibration : Calibration of the image uploaded. - Tags : Known tagged objects", "or has a different web source than nova.astrometry.net. Defaults to the nova.astrometry.net api", "keydex in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex", "os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str = None, temp_filename: str =", "string. args : dictionary, default = {} The arguments being sent over the", "if the status of the request provided is a valid status. status =", "it to extract its information. Returns ------- correlation_table : Table The table which", "== \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce what the error", "job sent to the API service. Parameters ---------- job_id : str, default =", "# Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result =", "should only be done once when the image is obtained.\"\"\" if self.__job_id is", "used. Returns ------- status : string The status of the submission. If the", "# These parameters are needed if being sent instead is an x,y list", "= session return session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission ID from", "the web API for astrometry.net. This API does not have the full functionality", "self.__submission_id = None return None __doc_submission_id = ( \"When file upload or table", "be in the temporary directory. delete_after : bool, default = True Delete the", "from a bad web request.\" ) # The logic should not flow beyond", "For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return", "The full path of the filename derived from saving it in a temporary", "table upload is sent to the API, the job ID of the\" \"", "wrapper around the web API for astrometry.net. This API does not have the", "url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for sending a request.", "status != \"success\": raise error.WebRequestError( \"The job result request failed, check that the", "# call the API again. if self.__job_id is not None: return self.__job_id #", "json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the request. request", "the file upload is used. Returns ------- results : dict The results of", "status of the submission. If the job has not run yet, None is", "These parameters are needed if being sent instead is an x,y list of", "the header file using Astropy. Parameters ---------- job_id : string, default = None", "\"There cannot be a job id without there being a submission for that\"", "a webrequest to the astrometry.net API service. Returns the results as well. Parameters", "\"\"\" # If the job ID already has been obtained, then there is", "downloading it to extract its information. Returns ------- wcs : Astropy WCS The", "the file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process the file", "without there being a submission for that\" \" job to operate on.\" )", "does not have the full functionality of the default Python client seen at", "that was used to upload the data. session : string The session ID", "None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None, float), # These", "not in valid_api_file_types: raise error.WebRequestError( \"The provided file type to be downloaded is", "image, people inputted. - Machine Tags : Ditto for tags, but only via", "system. Parameters ---------- apikey : string The API key for the web API", "None def __del_submission_id(self) -> None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id =", "the request provided is a valid status. status = result.get(\"status\") if status ==", "instance and the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results", "specified byt he centers, and its maximal deviation as specified # by the", "results : dict The results of the API call to upload the image.", "\" job to operate on.\" ) else: # What happened is unknown. raise", "None else upload_filename + \"_corr\" ) # The full path of the filename", "to get the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError:", "class is instantiated and # logged into. Use this session key for requests.", "session should be fine. session_key = session return session_key def __get_submission_id(self) -> str:", "going on. if self.submission_id is None: raise error.WebRequestError( \"There cannot be a job", "being sent over the web request. file_args : dictionary, default = None If", "be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to", "it did not fail. \"\"\" # Obtain the session key derived when this", "None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str:", "text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the status of the", "current job ID association.\"\"\" self.__job_id = None return None __doc_job_id = ( \"When", "a little can help in its # determination. (\"parity\", None, int), (\"downsample_factor\", None,", "None: raise error.WebRequestError(\"There is no job to download the file from.\") if library.http.get_http_status_code(url=file_download_url)", "the status of a job specified by its ID. Parameters ---------- job_id :", "Returns ------- wcs : Astropy WCS The world coordinate solution class for the", "provided is not a valid key.\" ) else: raise error.WebRequestError( \"The server returned", "is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These", "the ID determined by the file upload is used. Returns ------- status :", "not provided, the ID determined by the file upload is used. temp_filename :", "The service which the API URL for should be generated from. Returns -------", "dict: \"\"\"Generate the arguments for sending a request. This constructs the needed arguments,", "Objects in field : Known objects in the image field. - Annotations :", "error.WebRequestError( \"The server returned an error status message: \\n {message}\".format( message=error_message ) )", "it. Parameters ---------- pathname : str The pathname of the file to open.", "None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission ID has already been", "This should be used if the API is a self-hosted install or has", "datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to send this request to,", "A table in of the location of stars detected in the provided image.", "return args def _send_web_request( self, service: str, args: dict = {}, file_args: dict", "= job_result.get(\"status\") finally: return status # Should not get here. raise error.LogicFlowError return", "connecting to the web API using the API key. Parameters ---------- url :", "\"\"\"Downloads fits data table files which correspond to the job id. Parameters ----------", "public. (\"publicly_visible\", \"y\", str), # Image scaling parameters, if provided, when known, helps", "be understood by others and be specialized for OpihiExarata. Attributes ---------- _apikey :", "upload specifications. Returns ------- results : dictionary The results of the web request", "Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data", "sent to the API service. Parameters ---------- job_id : str, default = None", "and its maximal deviation as specified # by the radius parameter. (In degrees.)", "and WCS header information. - `rdls`: A table of reference stars nearby. -", "\"\"\"Extract the job ID from the image upload results. It may be the", "a new file, the submission and job IDs will change. # They must", "be generated from. Returns ------- url : str The URL for the service.", "enough to be understood by others and be specialized for OpihiExarata. Attributes ----------", "results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string)", "WCS header information. - `rdls`: A table of reference stars nearby. - `axy`:", "helps the # processing a little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\",", "service which is being requested. The web URL is constructed from this string.", "None The ID of the job that the results should be obtained from.", "temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename", "correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header from the", "request. It is a little different from the # normal API scheme so", "apikey} result = self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check if the", "job that the results should be obtained from. If not provided, the ID", "provided, the ID determined by the file upload is used. Returns ------- status", "Process the file upload. file_args = None try: file = open(pathname, \"rb\") filename", "obtaining it from the API service.\" ) return None def __del_job_id(self) -> None:", "used. Returns ------- result : dict The result of the submission. \"\"\" submission_id", "for requests. if self.session is not None: args.update({\"session\": self.session}) # The API requires", "the results of a job sent to the API service. Parameters ---------- job_id", "is likely because the job is still in queue. status = None else:", "= \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely", ") return None def __del_submission_id(self) -> None: \"\"\"Remove the current submission ID association.\"\"\"", "What happened is unknown. raise error.UndiscoveredError(\"Why the web request failed is unknown.\") else:", "Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\")", "parameters, if provided, when known, helps the # processing a little. (\"scale_units\", None,", ": str, default = None The ID of the job that the results", "library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary file after loading it if", "str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float),", "not exist: {path}\".format(path=pathname)) # Extract the submission id. This allows for easier #", "bool), # These parameters are needed if being sent instead is an x,y", "already been set by obtaining it from the API service.\" ) return None", "provided, the ID determined by the file upload is used. Returns ------- None", "are no jobs, then it is likely still in queue. if len(job_id_list) ==", "the general public. (\"publicly_visible\", \"y\", str), # Image scaling parameters, if provided, when", "header information. - `rdls`: A table of reference stars nearby. - `axy`: A", "actually exists. if job_id is None: raise error.WebRequestError(\"There is no job to download", "filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data from the file. __, correlation_table", "of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the URL for the request. It", "is saved here.\" ) __job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id)", "correct URL for the desired service. Because astrometry.net uses a convension, we can", "the type provided is a valid type which we can pull # from", "is obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The", "current submission ID association.\"\"\" self.__submission_id = None return None __doc_submission_id = ( \"When", "from. Returns ------- url : str The URL for the service. \"\"\" url", "\" again later.\" ) else: results = {} # For the status. results[\"status\"]", "job is still processing and thus the data files\" \" are not ready.\"", "needed if being sent instead is an x,y list of # source star", "float), # Image properties, preprocessing it a little can help in its #", "API key of the user. silent : bool, default = True Should there", "class is to be simple enough to be understood by others and be", "a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if", "defaultdex}) return args def _send_web_request( self, service: str, args: dict = {}, file_args:", "+ \"--\" + boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode()", "request. \"\"\" args = {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex", "class instance and the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results =", "status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename:", ": string The session ID of this API connection to astrometry.net \"\"\" #", "(In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None, float), # Image", "ID is correct or try\" \" again later.\" ) else: results = {}", "works and that the API key given is valid. if not session: raise", "be specialized for OpihiExarata. Attributes ---------- _apikey : string The API key used", "opihiexarata.library.error as error import opihiexarata.library.hint as hint # The base URL for the", "for the establishment of an initial guess # specified byt he centers, and", "Should there be printed messages as the processes are executed. This is helpful", "else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation(", "annotations. - Info : A collection of most everything above. \"\"\" job_id =", "a request. This constructs the needed arguments, replacing the defaults with user provided", "---------- job_id : string, default = None The ID of the job that", "API key. Parameters ---------- url : string, default = None The base url", "filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file.", "self.__login(apikey=apikey) self._apikey = apikey self.session = session_key # Placeholder variables. self.original_upload_filename = str()", "\"\"\" # When uploading a new file, the submission and job IDs will", "corrdinate data table file. - `new_fits`, `new_image`: A new fits file, containing the", "image upload results. It may be the case that there is not job", "upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename is not", "a submission for that\" \" job to operate on.\" ) else: # What", "from this string. args : dictionary, default = {} The arguments being sent", "should be obtained from. If not provided, the ID determined by the file", "result = library.json.json_to_dictionary(json_string=text) # Check if the status of the request provided is", "API service. Returns ------- session_key : string The session key for this login", "image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) ->", "= True Delete the file after downloading it to extract its information. Returns", "the job ID of the\" \" submission is saved here.\" ) __job_id =", "session. \"\"\" # The key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args)", "If there are no jobs, then it is likely still in queue. if", "unknown. raise error.UndiscoveredError(\"Why the web request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\",", "None: args.update({\"session\": self.session}) # The API requires that the data format must be", "(\"scale_est\", None, float), (\"scale_err\", None, float), # These parameters allows for the establishment", "not None: args.update({\"session\": self.session}) # The API requires that the data format must", "default = None The ID of the submission. If it is not passed,", "1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" )", "# Load the data from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1", "downloaded into a temporary directory. Parameters ---------- job_id : string, default = None", "json_data + \"\\n\" + \"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" +", ": dict The results of the API call to upload the image. \"\"\"", "Parameters ---------- job_id : string, default = None The ID of the job", "------- correlation_table : Table The table which details the correlation between the coordinates", ") __submission_id = None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def", "(\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None, float), #", "the service # desired. api_url = self._generate_service_url(service=service) # If the request requires that", "For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags.", "results of a submission specified by its ID. Parameters ---------- submission_id : str", "_generate_service_url(self, service: str) -> str: \"\"\"Generate the correct URL for the desired service.", "Parameters ---------- **kwargs : dict Arguments which would override the defaults. Returns -------", "\"\"\" # Get the proper job ID. job_id = job_id if job_id is", "if the API is a self-hosted install or has a different web source", "the file curl from the file type `ftype` and the job id `id`.\"\"\"", "an initial guess # specified byt he centers, and its maximal deviation as", "except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract the submission id.", "!= 200: raise error.WebRequestError( \"The file download link is not giving an acceptable", "astrometry.net uses a convension, we can follow it to obtain the desired service", "args.update({\"session\": self.session}) # The API requires that the data format must be a", "also determines the submission ID and the job ID for the uploaded image", "session.\" ) else: # The session should be fine. session_key = session return", "\"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] =", "-> str: \"\"\"The method to log into the API system. Parameters ---------- apikey", "------- status : string The status of the submission. \"\"\" submission_id = (", "str = None, silent: bool = True) -> None: \"\"\"The instantiation, connecting to", "ID is\" \" saved here.\" ) __submission_id = None submission_id = property( __get_submission_id,", "__init__(self, url=None, apikey: str = None, silent: bool = True) -> None: \"\"\"The", ") # Construct the URL for the request. It is a little different", "ID determined by the file upload is used. Returns ------- status : string", "message: \\n {message}\".format( message=error_message ) ) else: return result except urllib.error.HTTPError: raise error.WebRequestError(", "the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the data from", "is not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return", "\"\"\" job_id = job_id if job_id is not None else self.job_id # Get", "that is downloaded into a temporary directory. Parameters ---------- job_id : string, default", "delete_after : bool, default = True Delete the file after downloading it to", "None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None,", "be fine. session_key = session return session_key def __get_submission_id(self) -> str: \"\"\"Extract the", "here. raise error.LogicFlowError return None def get_submission_results(self, submission_id: str = None) -> dict:", "args = self._generate_upload_args(**kwargs) # Process the file upload. file_args = None try: file", "a submission specified by its ID. Parameters ---------- submission_id : str, default =", "file after downloading it to extract its information. Returns ------- correlation_table : Table", "pathname: str, **kwargs) -> dict: \"\"\"A wrapper to allow for the uploading of", "error.LogicFlowError return None def get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get the", "file upload. file_args = None try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname)", "the API service.\" ) return None def __del_job_id(self) -> None: \"\"\"Remove the current", "must be taken to sure it matches the upload specifications. Returns ------- results", "sky and in pixel space. job_id : str, default = None The ID", "because the job is still in queue. return None # Check that the", "== \"bad apikey\": raise error.WebRequestError( \"The API key provided is not a valid", "job yet associated with this submission. \"\"\" # If the job ID already", "the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download link", "= self._send_web_request(service=service_string) # For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string)", "allows for easier # association between this class instance and the uploaded file.", "= self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results def download_result_file( self, filename:", "the data files\" \" are not ready.\" ) # Download the file. library.http.download_file_from_url(", "The results of the API call to upload the image. \"\"\" # When", "sent over the web request. file_args : dictionary, default = None If a", "The filename that the downloaded wcs file will be downloaded as. The path", "# The base URL for the API which all other service URLs are", "(\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool), #", "None, float), (\"scale_err\", None, float), # These parameters allows for the establishment of", "the\" \" submission is saved here.\" ) __job_id = None job_id = property(__get_job_id,", "ID, it should only be done once when the image is obtained.\"\"\" if", "the astrometry.net job. They are, in general: (If the job has not finished", "determined by the file upload is used. Returns ------- status : string The", "self.job_id # Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) status =", "self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the", "pathname of the file to open. The filename is extracted and used as", "result of the job. service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result =", "downloaded wcs file will be downloaded as. The path is going to still", "a valid type which can\" \" be downloaded, it must be one of:", "# http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are for licensing and distribution", "_DEFAULT_BASE_URL ) # Use the API key to log in a derive a", "# specified byt he centers, and its maximal deviation as specified # by", "is not None: return self.__job_id # Call the API to get the job", "no jobs, then it is likely still in queue. if len(job_id_list) == 0:", "job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id) -> None: \"\"\"Assign", "error.WebRequestError( \"The API key provided is not a valid key.\" ) else: raise", "the job is still in queue. status = None else: # Check the", "= results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str", "if submission_id is not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result =", "job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error", "= urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the request. request = urllib.request.Request(url=api_url,", "None, temp_filename: str = None, delete_after: bool = True ) -> hint.Table: \"\"\"This", "API does not have the full functionality of the default Python client seen", "a little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None,", "file to be downloaded from astrometry.net. It should one of the following: -", "ID association.\"\"\" self.__submission_id = None return None __doc_submission_id = ( \"When file upload", "is obtained from the fits corr file that is downloaded into a temporary", ": str The URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service", "downloading the file, check that the file actually exists. if job_id is None:", "job_id if job_id is not None else self.job_id # Ensure that the type", "the API URL for should be generated from. Returns ------- url : str", "is a valid status. status = result.get(\"status\") if status == \"error\": error_message =", "None, list), (\"album\", None, str), ] def __init__(self, url=None, apikey: str = None,", "file after downloading it to extract its information. Returns ------- wcs : Astropy", "The status of the job. - Calibration : Calibration of the image uploaded.", "headers=headers, data=data) # Processing the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT )", "the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string", "= {} return None def __login(self, apikey: str) -> str: \"\"\"The method to", "status # Should not get here. raise error.LogicFlowError return None def get_submission_results(self, submission_id:", "of a submission specified by its ID. Parameters ---------- submission_id : str The", "If the job ID already has been obtained, then there is no reason", "to the API. This also determines the submission ID and the job ID", "\"\"\"This obtains the table that correlates the location of reference stars and their", "in general: (If the job has not finished yet, None is returned.) -", "id. Parameters ---------- filename : str The filename of the file when it", "= True ) -> hint.Table: \"\"\"This obtains the table that correlates the location", "if not session: raise error.WebRequestError( \"The provided API key did not provide a", "# When uploading a new file, the submission and job IDs will change.", "new fits file, containing the original image, annotations, and WCS header information. -", "is instantiated and # logged into. Use this session key for requests. if", "if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce what", "API connection to astrometry.net \"\"\" # The default arguments for uploading files. In", "the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result", "constructed from the service # desired. api_url = self._generate_service_url(service=service) # If the request", "None, temp_filename: str = None, delete_after: bool = True ) -> hint.WCS: \"\"\"This", "This allows for easier # association between this class instance and the uploaded", "the job id. Parameters ---------- filename : str The filename of the file", "astrometry.net. It should one of the following: - `wcs`: The world corrdinate data", "{} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value =", "being requested. The web URL is constructed from this string. args : dictionary,", "astrometry.net job. They are, in general: (If the job has not finished yet,", "raise error.WebRequestError( \"The web request output cannot be properly processed. This is likely\"", "the uploading of files or images to the API. This also determines the", "temp_filename if temp_filename is not None else upload_filename + \"_corr\" ) # The", "it should only be done once when the image is obtained.\"\"\" if self.__job_id", "# determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None,", "\" be downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types ) ) #", "it must be in the # correct format. Namely, a multipart/form-data format. if", "uploaded instead, special care must be taken to sure it matches the upload", "containing the original image, annotations, and WCS header information. - `rdls`: A table", "the web request failed is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If", "world corrdinate data table file. - `new_fits`, `new_image`: A new fits file, containing", "-> str: \"\"\"Get the status of a submission specified by its ID. Parameters", "request requires that a file be send, then it must be in the", "code.\" \" It is likely that the job is still processing and thus", "job_id: str = None ) -> None: \"\"\"Downloads fits data table files which", "ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url, filename=filename, overwrite=True ) return None", "association.\"\"\" self.__job_id = None return None __doc_job_id = ( \"When file upload or", "job status. status = job_result.get(\"status\") finally: return status # Should not get here.", "once when the image is obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id", "------- results : dictionary The results of the web request if it did", "if self.submission_id is None: raise error.WebRequestError( \"There cannot be a job id without", "results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str", "this submission. \"\"\" # If the job ID already has been obtained, then", "sending a webrequest to the astrometry.net API service. Returns the results as well.", "follow it to obtain the desired service URL. Parameters ---------- service : str", "__submission_id = None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self)", "header file and then computes World Coordinate System solution from it. Because astrometry.net", "data table files which correspond to the job id. Parameters ---------- filename :", "override the defaults. Returns ------- args : dict The arguments which can be", "directory. delete_after : bool, default = True Delete the file after downloading it", "= property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate the", "The point of this class is to be simple enough to be understood", "The filename that the downloaded correlation file will be downloaded as. The path", "file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check that the file", "of reference stars and their pixel locations. It is obtained from the fits", "Use the API key to log in a derive a session key. self.session", "files\" \" are not ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url, filename=filename,", "= str() self._image_return_results = {} return None def __login(self, apikey: str) -> str:", "All done. return results def get_job_status(self, job_id: str = None) -> str: \"\"\"Get", "obtain the desired service URL. Parameters ---------- service : str The service which", "file using Astropy. Parameters ---------- job_id : string, default = None The ID", "= self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) #", "urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot be properly processed. This is", "downloaded is not a valid type which can\" \" be downloaded, it must", "= None else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def", "a job sent to the API service. Parameters ---------- job_id : str, default", "if job_id is not None else self.job_id # Download the correlation file to", "The results of the astrometry.net job. They are, in general: (If the job", ") + \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" + \"--\" + boundary", "---------- filename : str The filename of the file when it is downloaded", "this API connection to astrometry.net \"\"\" # The default arguments for uploading files.", "# Detailed is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [", "- Calibration : Calibration of the image uploaded. - Tags : Known tagged", "None ) -> None: \"\"\"Downloads fits data table files which correspond to the", "for should be generated from. Returns ------- url : str The URL for", "provided image. - `corr`: A table of the correspondences between reference stars location", "detected in the provided image. - `corr`: A table of the correspondences between", "in the image, people inputted. - Machine Tags : Ditto for tags, but", "\"\"\"Get the status of a job specified by its ID. Parameters ---------- job_id", "their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are", "kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None: args.update({keydex: defaultdex})", "which we can pull # from the API service. Accommodating for capitalization. file_type", "IDs will change. # They must be reset because of their read-only nature.", "os import urllib.parse import urllib.request import urllib.error import random import astropy.wcs as ap_wcs", "guess # specified byt he centers, and its maximal deviation as specified #", "objects in the field, with annotations. - Info : A collection of most", "get_job_results(self, job_id: str = None) -> dict: \"\"\"Get the results of a job", "file that is downloaded into a temporary directory. Parameters ---------- job_id : string,", ": string The service which is being requested. The web URL is constructed", ") text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the status of", "client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to be simple", "the job. service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string) except", "the # correct format. Namely, a multipart/form-data format. if file_args is not None:", "Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) status = None try:", "\"bad apikey\": raise error.WebRequestError( \"The API key provided is not a valid key.\"", "when it is downloaded and saved to disk. file_type : str The type", "id. This allows for easier # association between this class instance and the", "(\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError( \"The", "Calibration of the image uploaded. - Tags : Known tagged objects in the", "is\" \" saved here.\" ) __submission_id = None submission_id = property( __get_submission_id, __set_submission_id,", "__job_id = None job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str)", "When uploading a new file, the submission and job IDs will change. #", "and their pixel locations. It is obtained from the fits corr file that", "the correlation file to read into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename", "library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation", "\"\"\"Remove the current job ID association.\"\"\" self.__job_id = None return None __doc_job_id =", "Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string)", "submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract", "Returns ------- session_key : string The session key for this login session. \"\"\"", "if it did not fail. \"\"\" # Obtain the session key derived when", "the data from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) #", "is None: raise error.WebRequestError(\"There is no job to download the file from.\") if", "from. This should be used if the API is a self-hosted install or", "it is not passed, the ID determined by the file upload is used.", "API key used to log in. original_upload_filename : string The original filename that", "should be used if the API is a self-hosted install or has a", "self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For", "if desired. if delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str =", "to send the request. \"\"\" args = {} for keydex, defaultdex, typedex in", "annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string =", "For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info.", "\"\"\"Get the results of a job sent to the API service. Parameters ----------", "wcs header file and then computes World Coordinate System solution from it. Because", "- `axy`: A table in of the location of stars detected in the", "{message}\".format( message=error_message ) ) else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web", "associated with this submission. \"\"\" # If the job ID already has been", "debugging or similar processes. Returns ------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL", "uploading files. In (key, value, type) form. # Detailed is also their useage", "------- session_key : string The session key for this login session. \"\"\" #", ": bool, default = True Should there be printed messages as the processes", "(\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None, list), (\"y\", None, list), (\"album\",", "opihiexarata.library as library import opihiexarata.library.error as error import opihiexarata.library.hint as hint # The", "in the sky and in pixel space. job_id : str, default = None", ": string, default = None The base url which all other API URL", "for that\" \" job to operate on.\" ) else: # What happened is", "str The service which the API URL for should be generated from. Returns", "(\"image_height\", None, int), (\"x\", None, list), (\"y\", None, list), (\"album\", None, str), ]", "fine. session_key = session return session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission", "upload results. It may be the case that there is not job yet", "the file after downloading it to extract its information. Returns ------- correlation_table :", "apikey : string The API key of the user. silent : bool, default", ": string The status of the submission. If the job has not run", "None: \"\"\"Assign the submission ID, it should only be done once when the", "= library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename is not None", "\"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" + \"--\" + boundary + \"--\\n\"", "raise error.ReadOnlyError( \"The submission ID has already been set by obtaining it from", "# The session should be fine. session_key = session return session_key def __get_submission_id(self)", "status = job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError( \"The job result", "-> str: \"\"\"Construct the file curl from the file type `ftype` and the", "else: # What happened is unknown. raise error.UndiscoveredError(\"Why the web request failed is", "image is obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id else: raise error.ReadOnlyError(", "is used. Returns ------- result : dict The result of the submission. \"\"\"", "sending a request. This constructs the needed arguments, replacing the defaults with user", "None else self.job_id # Download the correlation file to read into a data", "is not None else upload_filename + \"_wcs\" ) # The full path of", "API using the API key. Parameters ---------- url : string, default = None", "---------- **kwargs : dict Arguments which would override the defaults. Returns ------- args", "information. - `rdls`: A table of reference stars nearby. - `axy`: A table", "self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not None else _DEFAULT_BASE_URL ) #", "file upload is used. temp_filename : string, default = None The filename that", "given is valid. if not session: raise error.WebRequestError( \"The provided API key did", "to allow for the uploading of files or images to the API. This", "to extract its information. Returns ------- correlation_table : Table The table which details", "session works and that the API key given is valid. if not session:", "the status of the request provided is a valid status. status = result.get(\"status\")", "between reference stars location in the sky and in pixel space. job_id :", "correlates the location of reference stars and their pixel locations. It is obtained", "image and saves it. Parameters ---------- pathname : str The pathname of the", "key. Parameters ---------- url : string, default = None The base url which", "# by the radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float),", "fits_table_filename = ( temp_filename if temp_filename is not None else upload_filename + \"_wcs\"", "dictionary, default = None If a file is being uploaded instead, special care", "For the status. results[\"status\"] = status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id)", "field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string =", "going to still be in the temporary directory. delete_after : bool, default =", "= property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the", "API requires that the data format must be a JSON based datatype. json_data", "request.\" ) # The logic should not flow beyond this point. raise error.LogicFlowError", "= data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise, the form should be", "parameters allows for the establishment of an initial guess # specified byt he", "Save the file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process the", "self._apikey = apikey self.session = session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results", "os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs) -> dict: \"\"\"A wrapper to", "obtains the wcs header file and then computes World Coordinate System solution from", "\"--\" + boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else:", "hint.WCS: \"\"\"This obtains the wcs header file and then computes World Coordinate System", "# Image scaling parameters, if provided, when known, helps the # processing a", "arguments, replacing the defaults with user provided arguments where desired. Parameters ---------- **kwargs", "info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return results def", "the file, check that the file actually exists. if job_id is None: raise", "If it is not passed, the ID determined by the file upload is", "delete_after: bool = True ) -> hint.WCS: \"\"\"This obtains the wcs header file", "self.session}) # The API requires that the data format must be a JSON", "# If there are no jobs, then it is likely still in queue.", "of an initial guess # specified byt he centers, and its maximal deviation", "URL for the request. It is a little different from the # normal", "str: \"\"\"Get the status of a submission specified by its ID. Parameters ----------", "If a file is being uploaded instead, special care must be taken to", "error.WebRequestError( \"The web request output cannot be properly processed. This is likely\" \"", "are not ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url, filename=filename, overwrite=True )", "where desired. Parameters ---------- **kwargs : dict Arguments which would override the defaults.", "float), (\"center_dec\", None, float), (\"radius\", None, float), # Image properties, preprocessing it a", "in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary )", "= job_id if job_id is not None else self.job_id # Download the correlation", "# They must be reset because of their read-only nature. del self.submission_id, self.job_id", "there is no reason to # call the API again. if self.__job_id is", "for the API which all other service URLs are derived from. _DEFAULT_BASE_URL =", "submission specified by its ID. Parameters ---------- submission_id : str, default = None", "= self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str =", "used if the API is a self-hosted install or has a different web", "it for us, we just extract it from the header file using Astropy.", "file_args = {\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does not exist:", "from the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return", "extension=1 ) # Delete the temporary file after loading it if desired. if", "little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float),", "specified by its ID. Parameters ---------- submission_id : str, default = None The", "url which all other API URL links are derived from. This should be", "def __del_submission_id(self) -> None: \"\"\"Remove the current submission ID association.\"\"\" self.__submission_id = None", "the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results", "# Image properties, preprocessing it a little can help in its # determination.", "key.\" ) else: raise error.WebRequestError( \"The server returned an error status message: \\n", ") } data_pre = str( \"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\"", "= upload_results return upload_results def download_result_file( self, filename: str, file_type: str, job_id: str", "upload is used. Returns ------- result : dict The result of the submission.", "in the provided image. - `corr`: A table of the correspondences between reference", "of their read-only nature. del self.submission_id, self.job_id # Save the file information. self.original_upload_filename", "self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID, it should only", "all other API URL links are derived from. This should be used if", "which would override the defaults. Returns ------- args : dict The arguments which", "try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because the", "the results as well. Parameters ---------- service : string The service which is", "-> str: \"\"\"Extract the submission ID from the image upload results.\"\"\" image_results =", "astropy.wcs as ap_wcs import opihiexarata.library as library import opihiexarata.library.error as error import opihiexarata.library.hint", "submission_id is not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string)", "Processing the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read()", "stars and their pixel locations. \"\"\" job_id = job_id if job_id is not", "file_type=\"wcs\", job_id=job_id ) # Load the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname)", "it. Because astrometry.net computes it for us, we just extract it from the", "the correct URL for the desired service. Because astrometry.net uses a convension, we", "pixel locations. It is obtained from the fits corr file that is downloaded", "the downloaded correlation file will be downloaded as. The path is going to", "as ap_wcs import opihiexarata.library as library import opihiexarata.library.error as error import opihiexarata.library.hint as", "the submission. If the job has not run yet, None is returned instead.", "delete_after: os.remove(corr_pathname) return correlation_table def get_wcs( self, job_id: str = None, temp_filename: str", "provided, the ID determined by the file upload is used. temp_filename : string,", "def __login(self, apikey: str) -> str: \"\"\"The method to log into the API", "the user. silent : bool, default = True Should there be printed messages", "key. self.session = None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session = session_key", "boundary + \"--\\n\" data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise,", "normal API scheme so a new method is made. def _construct_file_download_url(ftype: str, id:", "None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\", None,", "generated from. Returns ------- url : str The URL for the service. \"\"\"", "saved here.\" ) __submission_id = None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id,", "done once when the image is obtained.\"\"\" if self.__job_id is None: self.__job_id =", "opihiexarata.library.hint as hint # The base URL for the API which all other", "status message: \\n {message}\".format( message=error_message ) ) else: return result except urllib.error.HTTPError: raise", "self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign", "Parameters ---------- filename : str The filename of the file when it is", "= None return None __doc_job_id = ( \"When file upload or table upload", "defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if keydex in kwargs: new_value = kwargs.pop(keydex) new_value =", "new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif defaultdex is not None:", "This error is likely because the job is still in queue. status =", "data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise, the form should be standard", "Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not None else", "name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" + \"--\" + boundary + \"\\n\"", "different web source than nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey :", "else: job_id_list = submission_results.get(\"jobs\", []) # If there are no jobs, then it", "random import astropy.wcs as ap_wcs import opihiexarata.library as library import opihiexarata.library.error as error", "desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs) -> dict:", "the web request. file_args : dictionary, default = None If a file is", "submission id. This allows for easier # association between this class instance and", "web request output cannot be properly processed. This is likely\" \" from a", "should be generated from. Returns ------- url : str The URL for the", "submission_id: str = None) -> str: \"\"\"Get the status of a submission specified", "status def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str = None, delete_after:", "These parameters allows for the establishment of an initial guess # specified byt", "return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for sending a", ") else: # What happened is unknown. raise error.UndiscoveredError(\"Why the web request failed", "None else self.job_id # Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id)", "download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file download", "WCS The world coordinate solution class for the image provided. \"\"\" job_id =", "not None else upload_filename + \"_wcs\" ) # The full path of the", "downloaded from astrometry.net. It should one of the following: - `wcs`: The world", "def get_wcs( self, job_id: str = None, temp_filename: str = None, delete_after: bool", "corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\",", "\"_wcs\" ) # The full path of the filename derived from saving it", "= status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) #", "thus the data files\" \" are not ready.\" ) # Download the file.", "the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url", "this point. raise error.LogicFlowError return None def get_job_results(self, job_id: str = None) ->", "= self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: # Make a more helpful error message", "uploading a new file, the submission and job IDs will change. # They", "into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename", "processing a little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\",", "_generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for sending a request. This constructs", "job ID association.\"\"\" self.__job_id = None return None __doc_job_id = ( \"When file", "Coordinate System solution from it. Because astrometry.net computes it for us, we just", "a multipart/form-data format. if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __", "str The filename of the file when it is downloaded and saved to", "the temporary file after loading it if desired. if delete_after: os.remove(corr_pathname) return wcs", "get here. raise error.LogicFlowError return None def get_submission_results(self, submission_id: str = None) ->", "def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str = None, delete_after: bool", "the data format must be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args) #", "False) if status != \"success\": raise error.WebRequestError( \"The job result request failed, check", "files or images to the API. This also determines the submission ID and", "= open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except", "corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save", "the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return results", "_id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file,", "most everything above. \"\"\" job_id = job_id if job_id is not None else", "with annotations. - Info : A collection of most everything above. \"\"\" job_id", "by the file upload is used. temp_filename : string, default = None The", ") fits_table_filename = ( temp_filename if temp_filename is not None else upload_filename +", "file upload is used. Returns ------- results : dict The results of the", "tagged objects in the image, people inputted. - Machine Tags : Ditto for", "file type `ftype` and the job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id", ": Calibration of the image uploaded. - Tags : Known tagged objects in", "Returns ------- results : dict The results of the astrometry.net job. They are,", "file upload or table upload is sent to the API, the submission ID", "urllib.request import urllib.error import random import astropy.wcs as ap_wcs import opihiexarata.library as library", "+ \"_wcs\" ) # The full path of the filename derived from saving", "format. if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)])", "data. session : string The session ID of this API connection to astrometry.net", "data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the request. request =", "self.submission_id, self.job_id # Save the file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs)", "url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check that the", "error_message == \"bad apikey\": raise error.WebRequestError( \"The API key provided is not a", "is used. Returns ------- results : dict The results of the astrometry.net job.", "no reason to # call the API again. if self.__job_id is not None:", "that there is not job yet associated with this submission. \"\"\" # If", "def download_result_file( self, filename: str, file_type: str, job_id: str = None ) ->", "is not None: args.update({\"session\": self.session}) # The API requires that the data format", "defaults. Returns ------- args : dict The arguments which can be used to", "get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get the results of a submission", "(\"y\", None, list), (\"album\", None, str), ] def __init__(self, url=None, apikey: str =", "that a file be send, then it must be in the # correct", "the submission ID is\" \" saved here.\" ) __submission_id = None submission_id =", "raise error.WebRequestError( \"The server returned an error status message: \\n {message}\".format( message=error_message )", "is downloaded into a temporary directory. Parameters ---------- job_id : string, default =", "for this login session. \"\"\" # The key. args = {\"apikey\": apikey} result", "is unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If there are no jobs,", "if temp_filename is not None else upload_filename + \"_corr\" ) # The full", "obtained.\"\"\" if self.__submission_id is None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission", "float), (\"scale_est\", None, float), (\"scale_err\", None, float), # These parameters allows for the", "# Check if the status of the request provided is a valid status.", "by the general public. (\"publicly_visible\", \"y\", str), # Image scaling parameters, if provided,", "file will be downloaded as. The path is going to still be in", "the sky and in pixel space. job_id : str, default = None The", "try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\":", "only be done once when the image is obtained.\"\"\" if self.__job_id is None:", "being uploaded instead, special care must be taken to sure it matches the", "+ 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post", "image provided. \"\"\" job_id = job_id if job_id is not None else self.job_id", "is made. def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the file curl", "------- result : dict The result of the submission. \"\"\" submission_id = (", "\"\"\"A wrapper function for sending a webrequest to the astrometry.net API service. Returns", "ID. Parameters ---------- submission_id : str The ID of the submission. If it", "used as well. Returns ------- results : dict The results of the API", "job_id=job_id ) # Load the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs", "+ boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data;", "# For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the", "= {} The arguments being sent over the web request. file_args : dictionary,", "http status code.\" \" It is likely that the job is still processing", "float), (\"scale_err\", None, float), # These parameters allows for the establishment of an", "when known, helps the # processing a little. (\"scale_units\", None, str), (\"scale_type\", None,", "\"\"\"The method to log into the API system. Parameters ---------- apikey : string", "requests. if self.session is not None: args.update({\"session\": self.session}) # The API requires that", "by the file upload is used. Returns ------- status : string The status", "and job IDs will change. # They must be reset because of their", "# desired. api_url = self._generate_service_url(service=service) # If the request requires that a file", "status of the request provided is a valid status. status = result.get(\"status\") if", "Returns ------- url : str The URL for the service. \"\"\" url =", "matches the upload specifications. Returns ------- results : dictionary The results of the", "# Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id ) # Load the", "is sent to the API, the submission ID is\" \" saved here.\" )", "= ( temp_filename if temp_filename is not None else upload_filename + \"_wcs\" )", "file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not", "timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT ) text = file.read() result = library.json.json_to_dictionary(json_string=text) # Check if the status", "def __set_job_id(self, job_id) -> None: \"\"\"Assign the job ID, it should only be", "the job. - Calibration : Calibration of the image uploaded. - Tags :", "the needed arguments, replacing the defaults with user provided arguments where desired. Parameters", "= library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\" ) corr_pathname = library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the", "url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) #", "boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\": 'multipart/form-data; boundary=\"{bd}\"'.format( bd=boundary ) } data_pre", "None else _DEFAULT_BASE_URL ) # Use the API key to log in a", "Check the job status. status = job_result.get(\"status\") finally: return status # Should not", "Placeholder variables. self.original_upload_filename = str() self._image_return_results = {} return None def __login(self, apikey:", "are for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), #", "file_type: str, job_id: str = None ) -> None: \"\"\"Downloads fits data table", "The web URL is constructed from this string. args : dictionary, default =", "correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary file after loading", "the web API using the API key. Parameters ---------- url : string, default", "The default arguments for uploading files. In (key, value, type) form. # Detailed", "desired. Parameters ---------- **kwargs : dict Arguments which would override the defaults. Returns", "deduce what the error is. if error_message == \"bad apikey\": raise error.WebRequestError( \"The", "file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary =", "else self.job_id # Ensure that the type provided is a valid type which", "be obtained from. If not provided, the ID determined by the file upload", "self.__job_id is not None: return self.__job_id # Call the API to get the", "string The API key of the user. silent : bool, default = True", "return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot be properly", "Download the correlation file to read into a data table. upload_filename = library.path.get_filename_without_extension(", "of the web request if it did not fail. \"\"\" # Obtain the", "raise error.LogicFlowError return None def get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get", "deviation as specified # by the radius parameter. (In degrees.) (\"center_ra\", None, float),", "stars nearby. - `axy`: A table in of the location of stars detected", "None is returned instead. \"\"\" job_id = job_id if job_id is not None", "\"\\r\\n\" ) data_post = \"\\n\" + \"--\" + boundary + \"--\\n\" data =", "System solution from it. Because astrometry.net computes it for us, we just extract", "only be done once when the image is obtained.\"\"\" if self.__submission_id is None:", "not None else _DEFAULT_BASE_URL ) # Use the API key to log in", "string, default = None The ID of the job that the results should", "float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool), # These parameters", "default = {} The arguments being sent over the web request. file_args :", "For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the", "is going to still be in the temporary directory. delete_after : bool, default", "is a valid type which we can pull # from the API service.", "\"d\", str), (\"allow_modifications\", \"d\", str), # For visibility by the general public. (\"publicly_visible\",", "URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around", "set by obtaining it from the API\" \" service.\" ) return None def", "the ID determined by the file upload is used. Returns ------- results :", "is being requested. The web URL is constructed from this string. args :", "---------- _apikey : string The API key used to log in. original_upload_filename :", "library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file after loading it if", "of the job. service_string = \"jobs/{id}\".format(id=job_id) try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: #", "None, str), ] def __init__(self, url=None, apikey: str = None, silent: bool =", "from the header file using Astropy. Parameters ---------- job_id : string, default =", "(\"x\", None, list), (\"y\", None, list), (\"album\", None, str), ] def __init__(self, url=None,", "= ( temp_filename if temp_filename is not None else upload_filename + \"_corr\" )", "job_id_list = submission_results.get(\"jobs\", []) # If there are no jobs, then it is", "the request. request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try: file", "library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id )", "that the downloaded wcs file will be downloaded as. The path is going", "new file, the submission and job IDs will change. # They must be", "= (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types: raise error.WebRequestError(", "stars detected in the provided image. - `corr`: A table of the correspondences", "have the full functionality of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The", "json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to send this request to, constructed", "apikey: str = None, silent: bool = True) -> None: \"\"\"The instantiation, connecting", "not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers", "Should not get here. raise error.LogicFlowError return None def get_submission_results(self, submission_id: str =", "in field : Known objects in the image field. - Annotations : Known", "\"--\" + boundary + \"\\n\" + \"Content-Type: text/plain\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition:", "data = data.encode(\"utf-8\") # Finally send the request. request = urllib.request.Request(url=api_url, headers=headers, data=data)", "open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except IOError:", "bad web request.\" ) # The logic should not flow beyond this point.", "a session key. self.session = None session_key = self.__login(apikey=apikey) self._apikey = apikey self.session", "is extracted and used as well. Returns ------- results : dict The results", "OpihiExarata. Attributes ---------- _apikey : string The API key used to log in.", "{} The arguments being sent over the web request. file_args : dictionary, default", "= None The filename that the downloaded wcs file will be downloaded as.", "+ file_args[\"data\"] + data_post.encode() else: # Otherwise, the form should be standard encoded:", "Extract the submission id. This allows for easier # association between this class", "API again. if self.__job_id is not None: return self.__job_id # Call the API", "status of a job specified by its ID. Parameters ---------- job_id : str,", "None, list), (\"y\", None, list), (\"album\", None, str), ] def __init__(self, url=None, apikey:", "default = None The filename that the downloaded correlation file will be downloaded", "of # source star positions. (\"image_width\", None, int), (\"image_height\", None, int), (\"x\", None,", "is correct or try\" \" again later.\" ) else: results = {} #", "file_args) self._image_return_results = upload_results return upload_results def download_result_file( self, filename: str, file_type: str,", "be send, then it must be in the # correct format. Namely, a", "\"The submission ID has already been set by obtaining it from the API\"", "filename that the downloaded correlation file will be downloaded as. The path is", "result of the submission. \"\"\" submission_id = ( submission_id if submission_id is not", "coordinates of the stars and their pixel locations. \"\"\" job_id = job_id if", "did not provide a valid session.\" ) else: # The session should be", "Ensure that the type provided is a valid type which we can pull", "error.WebRequestError(\"There is no job to download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200:", "other service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based", "result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce", "also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters", "still processing and thus the data files\" \" are not ready.\" ) #", "file_type=\"corr\", job_id=job_id ) # Load the data from the file. __, correlation_table =", "between this class instance and the uploaded file. upload_results = self._send_web_request(\"upload\", args, file_args)", "fail. \"\"\" # Obtain the session key derived when this class is instantiated", "application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\"", "correlation file will be downloaded as. The path is going to still be", "we just extract it from the header file using Astropy. Parameters ---------- job_id", "information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process the file upload. file_args", "import urllib.parse import urllib.request import urllib.error import random import astropy.wcs as ap_wcs import", "Returns ------- correlation_table : Table The table which details the correlation between the", "\"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in field. service_string = \"jobs/{id}/objects_in_field\".format(id=job_id)", "error.ReadOnlyError( \"The job ID has already been set by obtaining it from the", "obtains the table that correlates the location of reference stars and their pixel", "has been obtained, then there is no reason to # call the API", "not None: args.update({keydex: defaultdex}) return args def _send_web_request( self, service: str, args: dict", "for uploading files. In (key, value, type) form. # Detailed is also their", "from saving it in a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename, extension=\"fits\"", "is to be simple enough to be understood by others and be specialized", "call to upload the image. \"\"\" # When uploading a new file, the", "---------- submission_id : str The ID of the submission. If it is not", "session_key = session return session_key def __get_submission_id(self) -> str: \"\"\"Extract the submission ID", "from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper around the web API", ": Ditto for tags, but only via machine inputs. - Objects in field", "it is likely still in queue. if len(job_id_list) == 0: self.__job_id = None", "ap_wcs import opihiexarata.library as library import opihiexarata.library.error as error import opihiexarata.library.hint as hint", "be used if the API is a self-hosted install or has a different", "specified # by the radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None,", "The session key for this login session. \"\"\" # The key. args =", ") # The logic should not flow beyond this point. raise error.LogicFlowError return", "None) -> str: \"\"\"Get the status of a submission specified by its ID.", "the API key given is valid. if not session: raise error.WebRequestError( \"The provided", "the temporary file after loading it if desired. if delete_after: os.remove(corr_pathname) return correlation_table", "str, file_type: str, job_id: str = None ) -> None: \"\"\"Downloads fits data", "the request requires that a file be send, then it must be in", "str) -> str: \"\"\"The method to log into the API system. Parameters ----------", "[]) # If there are no jobs, then it is likely still in", "Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load the header", "the provided image. - `corr`: A table of the correspondences between reference stars", "reference stars nearby. - `axy`: A table in of the location of stars", "and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For visibility by", "job has not finished yet, None is returned.) - Status : The status", "= result.get(\"status\") if status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to", "self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status def get_reference_star_pixel_correlation( self,", "\"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition:", "encoded: x-www-form-encoded headers = {} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data", "hint.Table: \"\"\"This obtains the table that correlates the location of reference stars and", "---------- service : string The service which is being requested. The web URL", "establishment of an initial guess # specified byt he centers, and its maximal", "into the API system. Parameters ---------- apikey : string The API key for", "likely\" \" from a bad web request.\" ) # The logic should not", "web request.\" ) # The logic should not flow beyond this point. raise", "in pixel space. job_id : str, default = None The ID of the", "urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try: file = urllib.request.urlopen( request, timeout=library.config.ASTROMETRYNET_WEBAPI_JOB_QUEUE_TIMEOUT", "the request. \"\"\" args = {} for keydex, defaultdex, typedex in self._DEFAULT_URL_ARGUMENTS: if", "solution from it. Because astrometry.net computes it for us, we just extract it", "tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags. service_string", "-> dict: \"\"\"Get the results of a job sent to the API service.", ": Known objects in the image field. - Annotations : Known objects in", "downloading it to extract its information. Returns ------- correlation_table : Table The table", "been set by obtaining it from the API service.\" ) return None def", "to the job id. Parameters ---------- filename : str The filename of the", "variables. self.original_upload_filename = str() self._image_return_results = {} return None def __login(self, apikey: str)", "Tags : Known tagged objects in the image, people inputted. - Machine Tags", "request to, constructed from the service # desired. api_url = self._generate_service_url(service=service) # If", "silent: bool = True) -> None: \"\"\"The instantiation, connecting to the web API", "for debugging or similar processes. Returns ------- None \"\"\" # Defining the URL.", "still in queue. return None # Check that the service was successful. status", "return None __doc_submission_id = ( \"When file upload or table upload is sent", "service. Returns ------- session_key : string The session key for this login session.", "API system. Parameters ---------- apikey : string The API key for the web", "\"When file upload or table upload is sent to the API, the job", "results.get(\"status\") return status def get_reference_star_pixel_correlation( self, job_id: str = None, temp_filename: str =", "allows for the establishment of an initial guess # specified byt he centers,", "fits_table_filename = ( temp_filename if temp_filename is not None else upload_filename + \"_corr\"", "(key, value, type) form. # Detailed is also their useage cases per #", "\\n {message}\".format( message=error_message ) ) else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The", "proper job ID. job_id = job_id if job_id is not None else self.job_id", "on. if self.submission_id is None: raise error.WebRequestError( \"There cannot be a job id", "# Save the file information. self.original_upload_filename = pathname args = self._generate_upload_args(**kwargs) # Process", "pixel space. job_id : str, default = None The ID of the job", "the result of the job. service_string = \"jobs/{id}\".format(id=job_id) status = None try: job_result", "def get_job_status(self, job_id: str = None) -> str: \"\"\"Get the status of a", "if job_id is not None else self.job_id # Get the result of the", "the submission ID from the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id =", "# Get the result of the job. service_string = \"jobs/{id}\".format(id=job_id) status = None", "The ID of the submission. If it is not passed, the ID determined", "the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id = image_results.get(\"subid\", None) return self.__submission_id", "string The API key for the web API service. Returns ------- session_key :", "get_wcs( self, job_id: str = None, temp_filename: str = None, delete_after: bool =", "the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is", "(\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\", None, float), (\"scale_err\",", "data from the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete", "helpful error message for what is going on. if self.submission_id is None: raise", "self.original_upload_filename = str() self._image_return_results = {} return None def __login(self, apikey: str) ->", "upload is used. Returns ------- results : dict The results of the astrometry.net", "fits corr file that is downloaded into a temporary directory. Parameters ---------- job_id", "not None else self.job_id # Ensure that the type provided is a valid", "get_job_status(self, job_id: str = None) -> str: \"\"\"Get the status of a job", "the submission. \"\"\" submission_id = ( submission_id if submission_id is not None else", "here.\" ) __submission_id = None submission_id = property( __get_submission_id, __set_submission_id, __del_submission_id, __doc_submission_id, )", "for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if", "parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\", None, float), #", "image_results.get(\"subid\", None) return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID,", "+ \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] )", "default = True Delete the file after downloading it to extract its information.", "of a job specified by its ID. Parameters ---------- job_id : str, default", "the ID determined by the file upload is used. Returns ------- None \"\"\"", "bool = True ) -> hint.Table: \"\"\"This obtains the table that correlates the", ": string The session key for this login session. \"\"\" # The key.", "a job id without there being a submission for that\" \" job to", "is not None: boundary_key = \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key)", "job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This error is likely because the job", "(\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\",", "a little different from the # normal API scheme so a new method", "= library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"corr\", job_id=job_id", "a more helpful error message for what is going on. if self.submission_id is", "has a different web source than nova.astrometry.net. Defaults to the nova.astrometry.net api service.", "calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string =", "# These parameters are for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\",", "astrometry.net API service. Returns the results as well. Parameters ---------- service : string", "if job_id is not None else self.job_id # Ensure that the type provided", "# processing a little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float),", "service return url def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for sending", "dictionary, default = {} The arguments being sent over the web request. file_args", "it from the header file using Astropy. Parameters ---------- job_id : string, default", "the # normal API scheme so a new method is made. def _construct_file_download_url(ftype:", "key given is valid. if not session: raise error.WebRequestError( \"The provided API key", "form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" + \"--\" + boundary +", "objects in the image field. - Annotations : Known objects in the field,", "read-only nature. del self.submission_id, self.job_id # Save the file information. self.original_upload_filename = pathname", "job_id: str = None, temp_filename: str = None, delete_after: bool = True )", "pixel locations. \"\"\" job_id = job_id if job_id is not None else self.job_id", "__set_submission_id, __del_submission_id, __doc_submission_id, ) def __get_job_id(self) -> str: \"\"\"Extract the job ID from", "and # logged into. Use this session key for requests. if self.session is", "to be simple enough to be understood by others and be specialized for", "should be fine. session_key = session return session_key def __get_submission_id(self) -> str: \"\"\"Extract", "submission_id=self.submission_id ) except error.WebRequestError: # Make a more helpful error message for what", "it to obtain the desired service URL. Parameters ---------- service : str The", "The base URL for the API which all other service URLs are derived", "None is returned.) - Status : The status of the job. - Calibration", "The URL for the service. \"\"\" url = self.ASTROMETRY_BASE_API_URL + service return url", "replacing the defaults with user provided arguments where desired. Parameters ---------- **kwargs :", "beyond this point. raise error.LogicFlowError return None def get_job_results(self, job_id: str = None)", "= [ # These parameters are for licensing and distribution terms. (\"allow_commercial_use\", \"d\",", "of reference stars nearby. - `axy`: A table in of the location of", "be the case that there is not job yet associated with this submission.", "if keydex in kwargs: new_value = kwargs.pop(keydex) new_value = typedex(new_value) args.update({keydex: new_value}) elif", "library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does not", "self._send_web_request(service=service_string) # All done. return results def get_job_status(self, job_id: str = None) ->", "not None else self.job_id # Download the correlation file to read into a", "its ID. Parameters ---------- submission_id : str The ID of the submission. If", "The original filename that was used to upload the data. session : string", "format. Namely, a multipart/form-data format. if file_args is not None: boundary_key = \"\".join([random.choice(\"0123456789\")", "Status : The status of the job. - Calibration : Calibration of the", "It is likely that the job is still processing and thus the data", "table in of the location of stars detected in the provided image. -", "job_id is None: raise error.WebRequestError(\"There is no job to download the file from.\")", "# Load the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header)", "else: return result except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot be", "file download link is not giving an acceptable http status code.\" \" It", "request failed, check that the job ID is correct or try\" \" again", "downloaded as. The path is going to still be in the temporary directory.", "into a temporary directory. Parameters ---------- job_id : string, default = None The", "int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool),", "filename, \"data\": file.read()} except IOError: raise error.FileError(\"File does not exist: {path}\".format(path=pathname)) # Extract", "simple enough to be understood by others and be specialized for OpihiExarata. Attributes", "convension, we can follow it to obtain the desired service URL. Parameters ----------", "specialized for OpihiExarata. Attributes ---------- _apikey : string The API key used to", "If the job has not run yet, None is returned instead. \"\"\" job_id", "following: - `wcs`: The world corrdinate data table file. - `new_fits`, `new_image`: A", "machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For the objects in", "file. upload_results = self._send_web_request(\"upload\", args, file_args) self._image_return_results = upload_results return upload_results def download_result_file(", "from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary", "] def __init__(self, url=None, apikey: str = None, silent: bool = True) ->", "the # processing a little. (\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None,", "The results of the web request if it did not fail. \"\"\" #", "general: (If the job has not finished yet, None is returned.) - Status", "obtained.\"\"\" if self.__job_id is None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The job", "property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate the correct", "submission. \"\"\" # If the job ID already has been obtained, then there", ": Known objects in the field, with annotations. - Info : A collection", "the job ID, it should only be done once when the image is", "\"\"\" # Obtain the session key derived when this class is instantiated and", "web request. file_args : dictionary, default = None If a file is being", "be simple enough to be understood by others and be specialized for OpihiExarata.", "or try\" \" again later.\" ) else: results = {} # For the", "location of stars detected in the provided image. - `corr`: A table of", "uploading of files or images to the API. This also determines the submission", "was used to upload the data. session : string The session ID of", "results of the astrometry.net job. They are, in general: (If the job has", ") -> hint.WCS: \"\"\"This obtains the wcs header file and then computes World", "service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\",", "computes World Coordinate System solution from it. Because astrometry.net computes it for us,", "of the API call to upload the image. \"\"\" # When uploading a", "in valid_api_file_types: raise error.WebRequestError( \"The provided file type to be downloaded is not", "type which can\" \" be downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types", ": Table The table which details the correlation between the coordinates of the", "status of the submission. \"\"\" submission_id = ( submission_id if submission_id is not", "full path of the filename derived from saving it in a temporary #", "or images to the API. This also determines the submission ID and the", "file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename, \"data\": file.read()}", "import urllib.request import urllib.error import random import astropy.wcs as ap_wcs import opihiexarata.library as", ": string The original filename that was used to upload the data. session", "the file actually exists. if job_id is None: raise error.WebRequestError(\"There is no job", "requested. The web URL is constructed from this string. args : dictionary, default", "the data. session : string The session ID of this API connection to", "well. Parameters ---------- service : string The service which is being requested. The", "not get here. raise error.LogicFlowError return None def get_submission_results(self, submission_id: str = None)", "None else: # Check the job status. status = job_result.get(\"status\") finally: return status", "ap_wcs.WCS(wcs_header) # Delete the temporary file after loading it if desired. if delete_after:", "based datatype. json_data = library.json.dictionary_to_json(dictionary=args) # The URL which to send this request", "as error import opihiexarata.library.hint as hint # The base URL for the API", "to the API service. Parameters ---------- job_id : str, default = None The", "\"\"\" job_id = job_id if job_id is not None else self.job_id # Download", "an acceptable http status code.\" \" It is likely that the job is", "filename that the downloaded wcs file will be downloaded as. The path is", "the file after downloading it to extract its information. Returns ------- wcs :", "API is a self-hosted install or has a different web source than nova.astrometry.net.", "to sure it matches the upload specifications. Returns ------- results : dictionary The", "be one of: {fty}\".format( fty=valid_api_file_types ) ) # Construct the URL for the", "status code.\" \" It is likely that the job is still processing and", "its ID. Parameters ---------- submission_id : str, default = None The ID of", "url is not None else _DEFAULT_BASE_URL ) # Use the API key to", "None __doc_submission_id = ( \"When file upload or table upload is sent to", "others and be specialized for OpihiExarata. Attributes ---------- _apikey : string The API", "None, float), (\"radius\", None, float), # Image properties, preprocessing it a little can", "via machine inputs. - Objects in field : Known objects in the image", "temp_filename is not None else upload_filename + \"_corr\" ) # The full path", "or table upload is sent to the API, the job ID of the\"", "None) -> dict: \"\"\"Get the results of a job sent to the API", "the service was successful. status = job_result.get(\"status\", False) if status != \"success\": raise", "self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get the", "API service. Returns the results as well. Parameters ---------- service : string The", "that correlates the location of reference stars and their pixel locations. It is", ") except error.WebRequestError: # Make a more helpful error message for what is", "there is not job yet associated with this submission. \"\"\" # If the", "it if desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs)", "\"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] =", "the wcs header file and then computes World Coordinate System solution from it.", "= ( submission_id if submission_id is not None else self.submission_id ) results =", "the downloaded wcs file will be downloaded as. The path is going to", "= \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str = None)", "per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are for licensing and", "len(job_id_list) == 0: self.__job_id = None else: self.__job_id = job_id_list[-1] return self.__job_id raise", "URL for should be generated from. Returns ------- url : str The URL", "ID already has been obtained, then there is no reason to # call", "def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID, it should only be", "service. Returns the results as well. Parameters ---------- service : string The service", "the header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete", "which can\" \" be downloaded, it must be one of: {fty}\".format( fty=valid_api_file_types )", "Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class is to be", "self.session = session_key # Placeholder variables. self.original_upload_filename = str() self._image_return_results = {} return", "if the session works and that the API key given is valid. if", "# The key. args = {\"apikey\": apikey} result = self._send_web_request(service=\"login\", args=args) session =", "submission for that\" \" job to operate on.\" ) else: # What happened", "upload is used. Returns ------- status : string The status of the submission.", "\"\"\"Extract the submission ID from the image upload results.\"\"\" image_results = self._image_return_results self.__submission_id", "everything above. \"\"\" job_id = job_id if job_id is not None else self.job_id", "specified by its ID. Parameters ---------- job_id : str, default = None The", "service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For the machine tags. service_string =", "result def get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get the status of", "jobs, then it is likely still in queue. if len(job_id_list) == 0: self.__job_id", "pathname : str The pathname of the file to open. The filename is", "get the job ID. try: submission_results = self.get_submission_results( submission_id=self.submission_id ) except error.WebRequestError: #", "+ \"--\" + boundary + \"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" +", "its information. Returns ------- correlation_table : Table The table which details the correlation", "instead is an x,y list of # source star positions. (\"image_width\", None, int),", "job_id = job_id if job_id is not None else self.job_id # Download the", "return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID, it should", ": str The service which the API URL for should be generated from.", "determination. (\"parity\", None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int),", "the file upload is used. temp_filename : string, default = None The filename", "API, the submission ID is\" \" saved here.\" ) __submission_id = None submission_id", "files which correspond to the job id. Parameters ---------- filename : str The", "str = None, delete_after: bool = True ) -> hint.WCS: \"\"\"This obtains the", "if error_message == \"bad apikey\": raise error.WebRequestError( \"The API key provided is not", "# normal API scheme so a new method is made. def _construct_file_download_url(ftype: str,", "Info : A collection of most everything above. \"\"\" job_id = job_id if", "the current submission ID association.\"\"\" self.__submission_id = None return None __doc_submission_id = (", "likely because the job is still in queue. status = None else: #", "the file. __, correlation_table = library.fits.read_fits_table_file( filename=corr_pathname, extension=1 ) # Delete the temporary", "[ # These parameters are for licensing and distribution terms. (\"allow_commercial_use\", \"d\", str),", "\"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading", "= library.json.dictionary_to_json(dictionary=args) # The URL which to send this request to, constructed from", "Image properties, preprocessing it a little can help in its # determination. (\"parity\",", "the URL for the request. It is a little different from the #", "(\"tweak_order\", None, int), (\"crpix_center\", None, bool), (\"invert\", None, bool), # These parameters are", "ID has already been set by obtaining it from the API service.\" )", "its information. Returns ------- wcs : Astropy WCS The world coordinate solution class", "self, service: str, args: dict = {}, file_args: dict = None ) ->", "can be used to send the request. \"\"\" args = {} for keydex,", "else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self,", "# Placeholder variables. self.original_upload_filename = str() self._image_return_results = {} return None def __login(self,", "path of the filename derived from saving it in a temporary # directory.", "the API\" \" service.\" ) return None def __del_submission_id(self) -> None: \"\"\"Remove the", "A table of reference stars nearby. - `axy`: A table in of the", "request provided is a valid status. status = result.get(\"status\") if status == \"error\":", "None, bool), (\"invert\", None, bool), # These parameters are needed if being sent", "directory. Parameters ---------- job_id : string, default = None The ID of the", "loading it if desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str,", "request = urllib.request.Request(url=api_url, headers=headers, data=data) # Processing the request. try: file = urllib.request.urlopen(", "------- results : dict The results of the astrometry.net job. They are, in", "------- args : dict The arguments which can be used to send the", "properties, preprocessing it a little can help in its # determination. (\"parity\", None,", "str: \"\"\"The method to log into the API system. Parameters ---------- apikey :", "Check if the status of the request provided is a valid status. status", "of the astrometry.net job. They are, in general: (If the job has not", "= \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) # For the info. service_string = \"jobs/{id}/info\".format(id=job_id) results[\"info\"]", "Returns ------- status : string The status of the submission. If the job", "wrapper to allow for the uploading of files or images to the API.", "upload_results def download_result_file( self, filename: str, file_type: str, job_id: str = None )", "processing and thus the data files\" \" are not ready.\" ) # Download", "\"The job result request failed, check that the job ID is correct or", "association between this class instance and the uploaded file. upload_results = self._send_web_request(\"upload\", args,", "job id `id`.\"\"\" url = \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url =", "a job specified by its ID. Parameters ---------- job_id : str, default =", "raise error.WebRequestError( \"The provided API key did not provide a valid session.\" )", "\"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"request-json\"\\r\\n' + \"\\r\\n\" + json_data + \"\\n\" +", "field : Known objects in the image field. - Annotations : Known objects", "done once when the image is obtained.\"\"\" if self.__submission_id is None: self.__submission_id =", "= ap_wcs.WCS(wcs_header) # Delete the temporary file after loading it if desired. if", "to download the file from.\") if library.http.get_http_status_code(url=file_download_url) != 200: raise error.WebRequestError( \"The file", "results as well. Parameters ---------- service : string The service which is being", "{} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally", "does not exist: {path}\".format(path=pathname)) # Extract the submission id. This allows for easier", "A table of the correspondences between reference stars location in the sky and", "job_result.get(\"status\", False) if status != \"success\": raise error.WebRequestError( \"The job result request failed,", "True Delete the file after downloading it to extract its information. Returns -------", "status # For the calibrations. service_string = \"jobs/{id}/calibration\".format(id=job_id) results[\"calibration\"] = self._send_web_request(service=service_string) # For", "not fail. \"\"\" # Obtain the session key derived when this class is", "by its ID. Parameters ---------- job_id : str, default = None The ID", "args : dict The arguments which can be used to send the request.", "is being uploaded instead, special care must be taken to sure it matches", "---------- service : str The service which the API URL for should be", "this session key for requests. if self.session is not None: args.update({\"session\": self.session}) #", "to the astrometry.net API service. Returns the results as well. Parameters ---------- service", "space. job_id : str, default = None The ID of the job that", "args def _send_web_request( self, service: str, args: dict = {}, file_args: dict =", "None, int), (\"downsample_factor\", None, int), (\"positional_error\", None, float), (\"tweak_order\", None, int), (\"crpix_center\", None,", "# Defining the URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not None", "self._send_web_request(service=\"login\", args=args) session = result.get(\"session\", False) # Check if the session works and", "name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" +", "Parameters ---------- apikey : string The API key for the web API service.", "the submission. If it is not passed, the ID determined by the file", "args : dictionary, default = {} The arguments being sent over the web", "job ID already has been obtained, then there is no reason to #", "status == \"error\": error_message = result.get(\"errormessage\", \"(none)\") # Try to deduce what the", "upload the data. session : string The session ID of this API connection", "= None) -> str: \"\"\"Get the status of a job specified by its", "file to read into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename", "string The session ID of this API connection to astrometry.net \"\"\" # The", "= None, delete_after: bool = True ) -> hint.WCS: \"\"\"This obtains the wcs", "The filename of the file when it is downloaded and saved to disk.", "the API service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\",", "str: \"\"\"Extract the submission ID from the image upload results.\"\"\" image_results = self._image_return_results", "by the radius parameter. (In degrees.) (\"center_ra\", None, float), (\"center_dec\", None, float), (\"radius\",", "= \"\".join([random.choice(\"0123456789\") for __ in range(19)]) boundary = \"==============={bkey}==\".format(bkey=boundary_key) headers = { \"Content-Type\":", "\"\"\"The instantiation, connecting to the web API using the API key. Parameters ----------", "session_key = self.__login(apikey=apikey) self._apikey = apikey self.session = session_key # Placeholder variables. self.original_upload_filename", "service URLs are derived from. _DEFAULT_BASE_URL = \"http://nova.astrometry.net/api/\" class AstrometryNetWebAPIEngine(hint.AstrometryEngine): \"\"\"A python-based wrapper", "of the submission. If the job has not run yet, None is returned", "is None: self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission ID has already", "to read into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename ) fits_table_filename =", "computes it for us, we just extract it from the header file using", "return result def get_submission_status(self, submission_id: str = None) -> str: \"\"\"Get the status", "logic should not flow beyond this point. raise error.LogicFlowError return None def get_job_results(self,", "the submission id. This allows for easier # association between this class instance", "# The default arguments for uploading files. In (key, value, type) form. #", "# from the API service. Accommodating for capitalization. file_type = str(file_type).lower() valid_api_file_types =", "str) -> str: \"\"\"Generate the correct URL for the desired service. Because astrometry.net", "submission and job IDs will change. # They must be reset because of", "silent : bool, default = True Should there be printed messages as the", "**kwargs) -> dict: \"\"\"A wrapper to allow for the uploading of files or", "result.get(\"session\", False) # Check if the session works and that the API key", "= self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id) results[\"annotations\"] = self._send_web_request(service=service_string) #", "( \"When file upload or table upload is sent to the API, the", "inputs. - Objects in field : Known objects in the image field. -", "ID of the job that the results should be obtained from. If not", "self._send_web_request(service=service_string) # For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) #", "= True) -> None: \"\"\"The instantiation, connecting to the web API using the", "# What happened is unknown. raise error.UndiscoveredError(\"Why the web request failed is unknown.\")", "else: raise error.WebRequestError( \"The server returned an error status message: \\n {message}\".format( message=error_message", ") # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) # Load", "is None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The job ID has already", "job_id = property(__get_job_id, __set_job_id, __del_job_id, __doc_job_id) def _generate_service_url(self, service: str) -> str: \"\"\"Generate", "this login session. \"\"\" # The key. args = {\"apikey\": apikey} result =", "the image. \"\"\" # When uploading a new file, the submission and job", "understood by others and be specialized for OpihiExarata. Attributes ---------- _apikey : string", "unknown.\") else: job_id_list = submission_results.get(\"jobs\", []) # If there are no jobs, then", "extract its information. Returns ------- correlation_table : Table The table which details the", "None, int), (\"image_height\", None, int), (\"x\", None, list), (\"y\", None, list), (\"album\", None,", "not None else self.submission_id ) results = self.get_submission_results(submission_id=submission_id) status = results.get(\"status\") return status", "location of reference stars and their pixel locations. It is obtained from the", "derived from saving it in a temporary # directory. corr_filename = library.path.merge_pathname( filename=fits_table_filename,", "capitalization. file_type = str(file_type).lower() valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type", "dict The result of the submission. \"\"\" submission_id = ( submission_id if submission_id", "submission_id if submission_id is not None else self.submission_id ) service_string = \"submissions/{sub_id}\".format(sub_id=submission_id) result", "__set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID, it should only be done", "-> hint.WCS: \"\"\"This obtains the wcs header file and then computes World Coordinate", "(\"scale_units\", None, str), (\"scale_type\", None, str), (\"scale_lower\", None, float), (\"scale_upper\", None, float), (\"scale_est\",", "- Machine Tags : Ditto for tags, but only via machine inputs. -", "self.__submission_id = sub_id else: raise error.ReadOnlyError( \"The submission ID has already been set", "method is made. def _construct_file_download_url(ftype: str, id: str) -> str: \"\"\"Construct the file", "web source than nova.astrometry.net. Defaults to the nova.astrometry.net api service. apikey : string", "# Otherwise, the form should be standard encoded: x-www-form-encoded headers = {} data", "that the type provided is a valid type which we can pull #", "except urllib.error.HTTPError: raise error.WebRequestError( \"The web request output cannot be properly processed. This", "(\"allow_modifications\", \"d\", str), # For visibility by the general public. (\"publicly_visible\", \"y\", str),", "def _generate_upload_args(self, **kwargs) -> dict: \"\"\"Generate the arguments for sending a request. This", "a file be send, then it must be in the # correct format.", "str: \"\"\"Get the status of a job specified by its ID. Parameters ----------", "ID. job_id = job_id if job_id is not None else self.job_id # Ensure", "args=args) session = result.get(\"session\", False) # Check if the session works and that", "cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS = [ # These parameters are for licensing", "licensing and distribution terms. (\"allow_commercial_use\", \"d\", str), (\"allow_modifications\", \"d\", str), # For visibility", "has already been set by obtaining it from the API\" \" service.\" )", "name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post = \"\\n\" + \"--\" +", "error.WebRequestError( \"The provided API key did not provide a valid session.\" ) else:", "standard encoded: x-www-form-encoded headers = {} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data)", "+ \"\\r\\n\" ) data_post = \"\\n\" + \"--\" + boundary + \"--\\n\" data", "# Call the API to get the job ID. try: submission_results = self.get_submission_results(", "the job is still in queue. return None # Check that the service", "# For the machine tags. service_string = \"jobs/{id}/machine_tags\".format(id=job_id) results[\"machine_tags\"] = self._send_web_request(service=service_string) # For", "the fits corr file that is downloaded into a temporary directory. Parameters ----------", "__get_job_id(self) -> str: \"\"\"Extract the job ID from the image upload results. It", "returned an error status message: \\n {message}\".format( message=error_message ) ) else: return result", "None try: file = open(pathname, \"rb\") filename = library.path.get_filename_with_extension(pathname=pathname) file_args = {\"filename\": filename,", "list), (\"album\", None, str), ] def __init__(self, url=None, apikey: str = None, silent:", "job ID has already been set by obtaining it from the API service.\"", "scaling parameters, if provided, when known, helps the # processing a little. (\"scale_units\",", "of the default Python client seen at https://github.com/dstndstn/astrometry.net/blob/master/net/client/client.py. The point of this class", "None The filename that the downloaded wcs file will be downloaded as. The", "of this API connection to astrometry.net \"\"\" # The default arguments for uploading", "correlation file to read into a data table. upload_filename = library.path.get_filename_without_extension( pathname=self.original_upload_filename )", "log in a derive a session key. self.session = None session_key = self.__login(apikey=apikey)", "Check if the session works and that the API key given is valid.", "-> dict: \"\"\"A wrapper function for sending a webrequest to the astrometry.net API", "helpful for debugging or similar processes. Returns ------- None \"\"\" # Defining the", "\"jobs/{id}/info\".format(id=job_id) results[\"info\"] = self._send_web_request(service=service_string) # All done. return results def get_job_status(self, job_id: str", "else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def __set_job_id(self, job_id)", "be done once when the image is obtained.\"\"\" if self.__job_id is None: self.__job_id", "None else: self.__job_id = job_id_list[-1] return self.__job_id raise error.LogicFlowError return None def __set_job_id(self,", "job ID for the uploaded image and saves it. Parameters ---------- pathname :", "been set by obtaining it from the API\" \" service.\" ) return None", "\"submissions/{sub_id}\".format(sub_id=submission_id) result = self._send_web_request(service=service_string) return result def get_submission_status(self, submission_id: str = None) ->", "is returned instead. \"\"\" job_id = job_id if job_id is not None else", "images to the API. This also determines the submission ID and the job", "- `corr`: A table of the correspondences between reference stars location in the", "it should only be done once when the image is obtained.\"\"\" if self.__submission_id", "because of their read-only nature. del self.submission_id, self.job_id # Save the file information.", "what the error is. if error_message == \"bad apikey\": raise error.WebRequestError( \"The API", "A new fits file, containing the original image, annotations, and WCS header information.", "self.__job_id is None: self.__job_id = job_id else: raise error.ReadOnlyError( \"The job ID has", "the job ID already has been obtained, then there is no reason to", "or similar processes. Returns ------- None \"\"\" # Defining the URL. self.ASTROMETRY_BASE_API_URL =", "determined by the file upload is used. Returns ------- None \"\"\" # Get", "apikey\": raise error.WebRequestError( \"The API key provided is not a valid key.\" )", "results = {} # For the status. results[\"status\"] = status # For the", "in the image field. - Annotations : Known objects in the field, with", "pathname=self.original_upload_filename ) fits_table_filename = ( temp_filename if temp_filename is not None else upload_filename", "the results of a submission specified by its ID. Parameters ---------- submission_id :", "if desired. if delete_after: os.remove(corr_pathname) return wcs def upload_file(self, pathname: str, **kwargs) ->", "'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" + \"\\r\\n\" ) data_post =", "return status # Should not get here. raise error.LogicFlowError return None def get_submission_results(self,", "determined by the file upload is used. temp_filename : string, default = None", "def get_submission_results(self, submission_id: str = None) -> dict: \"\"\"Get the results of a", "function for sending a webrequest to the astrometry.net API service. Returns the results", "valid_api_file_types = (\"wcs\", \"new_fits\", \"rdls\", \"axy\", \"corr\") if file_type not in valid_api_file_types: raise", "type) form. # Detailed is also their useage cases per # http://astrometry.net/doc/net/api.html#submitting-a-url _DEFAULT_URL_ARGUMENTS", "request if it did not fail. \"\"\" # Obtain the session key derived", "is going on. if self.submission_id is None: raise error.WebRequestError( \"There cannot be a", "session ID of this API connection to astrometry.net \"\"\" # The default arguments", "upload is sent to the API, the submission ID is\" \" saved here.\"", ": The status of the job. - Calibration : Calibration of the image", "service_string = \"jobs/{id}/objects_in_field\".format(id=job_id) results[\"objects_in_field\"] = self._send_web_request(service=service_string) # For the annotations. service_string = \"jobs/{id}/annotations\".format(id=job_id)", "temporary file after loading it if desired. if delete_after: os.remove(corr_pathname) return correlation_table def", "by others and be specialized for OpihiExarata. Attributes ---------- _apikey : string The", "None # Check that the service was successful. status = job_result.get(\"status\", False) if", "dict = None ) -> dict: \"\"\"A wrapper function for sending a webrequest", "Parameters ---------- pathname : str The pathname of the file to open. The", "type of file to be downloaded from astrometry.net. It should one of the", "# Before downloading the file, check that the file actually exists. if job_id", "\" are not ready.\" ) # Download the file. library.http.download_file_from_url( url=file_download_url, filename=filename, overwrite=True", "= _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the file, check that the file actually", "\" It is likely that the job is still processing and thus the", "\"\\n\" + \"Content-Type: application/octet-stream\\r\\n\" + \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"]", "URL. self.ASTROMETRY_BASE_API_URL = ( str(url) if url is not None else _DEFAULT_BASE_URL )", "bool = True ) -> hint.WCS: \"\"\"This obtains the wcs header file and", "None) return self.__submission_id def __set_submission_id(self, sub_id) -> None: \"\"\"Assign the submission ID, it", "is not None: args.update({keydex: defaultdex}) return args def _send_web_request( self, service: str, args:", "wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the temporary file after loading", ") ) # Construct the URL for the request. It is a little", "+ \"MIME-Version: 1.0\\r\\n\" + 'Content-disposition: form-data; name=\"file\"; filename=\"{name}\"'.format( name=file_args[\"filename\"] ) + \"\\r\\n\" +", "= \"jobs/{id}\".format(id=job_id) status = None try: job_result = self._send_web_request(service=service_string) except error.WebRequestError: # This", "library.temporary.make_temporary_directory_path( filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id )", "wcs file will be downloaded as. The path is going to still be", ": Astropy WCS The world coordinate solution class for the image provided. \"\"\"", "that the data format must be a JSON based datatype. json_data = library.json.dictionary_to_json(dictionary=args)", "submission. \"\"\" submission_id = ( submission_id if submission_id is not None else self.submission_id", "method to log into the API system. Parameters ---------- apikey : string The", "None, delete_after: bool = True ) -> hint.WCS: \"\"\"This obtains the wcs header", "results : dictionary The results of the web request if it did not", "= {} data = {\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") #", "then it is likely still in queue. if len(job_id_list) == 0: self.__job_id =", "It is a little different from the # normal API scheme so a", "_type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before downloading the", "data = data_pre.encode() + file_args[\"data\"] + data_post.encode() else: # Otherwise, the form should", "= \"http://nova.astrometry.net/{_type}_file/{_id}\".format( _type=ftype, _id=id ) return url file_download_url = _construct_file_download_url(ftype=file_type, id=job_id) # Before", "self._send_web_request(service=service_string) # For the tags. service_string = \"jobs/{id}/tags\".format(id=job_id) results[\"tags\"] = self._send_web_request(service=service_string) # For", "still in queue. status = None else: # Check the job status. status", "is likely\" \" from a bad web request.\" ) # The logic should", "reset because of their read-only nature. del self.submission_id, self.job_id # Save the file", "for the web API service. Returns ------- session_key : string The session key", "file upload is used. Returns ------- result : dict The result of the", "= {\"request-json\": json_data} data = urllib.parse.urlencode(data) data = data.encode(\"utf-8\") # Finally send the", "cannot be properly processed. This is likely\" \" from a bad web request.\"", "returned.) - Status : The status of the job. - Calibration : Calibration", "ID determined by the file upload is used. Returns ------- results : dict", "which details the correlation between the coordinates of the stars and their pixel", "file_args: dict = None ) -> dict: \"\"\"A wrapper function for sending a", "URL which to send this request to, constructed from the service # desired.", "library.json.dictionary_to_json(dictionary=args) # The URL which to send this request to, constructed from the", "the stars and their pixel locations. \"\"\" job_id = job_id if job_id is", "check that the file actually exists. if job_id is None: raise error.WebRequestError(\"There is", "header from the file. wcs_header = library.fits.read_fits_header(filename=corr_pathname) wcs = ap_wcs.WCS(wcs_header) # Delete the", "dict The results of the astrometry.net job. They are, in general: (If the", "an x,y list of # source star positions. (\"image_width\", None, int), (\"image_height\", None,", "filename=corr_filename ) # Save the correlation file. self.download_result_file( filename=corr_pathname, file_type=\"wcs\", job_id=job_id ) #", "None: raise error.WebRequestError( \"There cannot be a job id without there being a", "`axy`: A table in of the location of stars detected in the provided", "for the request. It is a little different from the # normal API", "the job is still processing and thus the data files\" \" are not", "dict = {}, file_args: dict = None ) -> dict: \"\"\"A wrapper function", "type to be downloaded is not a valid type which can\" \" be" ]
[ "mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds)", "i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg)", "around two python classes. A prior and posterior class. There should be a", "import QTable, Table import arviz as az from astropy.coordinates import SkyCoord from astropy", "idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in range(0,len(Table)): #limit to", "have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'],", "pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3", "prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if", "hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)", "1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu'])", "In[29]: photoz # In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try:", "prior_list = construct_prior(t) if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind", "and assign it to the three XID+ prior classes. # In[41]: #pixsize array", "from astropy.table import QTable, Table import arviz as az from astropy.coordinates import SkyCoord", "def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard cut (i.e. not star", "the closest and check if its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id']", "formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"],", "#pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert", "on background (assumes Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])", "In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import", "with a MOC. It also requires an input prior catalogue and point spread", "for i in z[:, src]]) + ' \\n') elif 'fracAGN =' in line:", "= ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \\n')", "2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ #", "in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list", "# # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and", "WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to", "' \\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs", "wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf", "= construct_prior(t) if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind =", "set with a MOC. It also requires an input prior catalogue and point", "We need uncertianty so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]:", "= fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500", "SED prior model extension to XID+. Here we focus on sources in [Rowan-Robinson", "cutting down maps and catalogues so they cover the same area. It can", "#photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz", "to the three XID+ prior classes. # In[41]: #pixsize array (size of pixels", "src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import vstack,", "uncertianty so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for", "15 (the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue", "prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and", "in prior list if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return", "(in arcseconds) hdulist.close() # XID+ uses Multi Order Coverage (MOC) maps for cutting", "maps for cutting down maps and catalogues so they cover the same area.", "resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to fit (and make any", "prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'],", "HEALPix order of 15 (the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) #", "# In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz #", "should be a prior class for each map being fitted. It is initiated", "scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+", "It is initiated with a map, noise map, primary header and map header", "if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]:", "will use a HEALPix order of 15 (the resolution: higher order means higher", "in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel", "pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)", "using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale", "it using PyVO # In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") #", "#add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as u", "(np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask ==", "# In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'):", "+ ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \\n') elif 'fracAGN", "sources ind = idxc == src #if there are matches if ind.sum() >0:", "redshift. We need uncertianty so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) #", "nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import vstack, hstack return hstack(wavelengths), hstack(fluxes)", "(and make any cuts). Here we use HELP's VO database and directly call", "350 scale in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale", "#Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output", "1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates", "In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard cut (i.e.", "a MOC. It also requires an input prior catalogue and point spread function.", "the SED prior model extension to XID+. Here we focus on sources in", "sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as u #create table of", "## Get Redshift and Uncertianty # # <NAME> defines a median and a", "call it using PyVO # In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\")", "need uncertianty so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27'))", "import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False)", "units as u for i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j:", "range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0)", "Get Redshift and Uncertianty # # <NAME> defines a median and a hierarchical", "vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS',", "can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3", "terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x", "open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift =' in line: fout.write(' redshift", "yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] #", "# set more appropriate units for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity']", "25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use", "check if its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not", "images, noise maps, header info and WCS information # In[6]: #-----250------------- hdulist =", "= np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z", "fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more", "= d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :]", "(i.e. not star and is detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3)", "= wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+ uses Multi Order", "source to new sources table, create prior list if new_Table is not None:", "prior class for each map being fitted. It is initiated with a map,", "# Before fitting, the prior classes need to take the PSF and calculate", "if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep", "from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0", "coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]:", ">0: #choose the closest and check if its in the prior list all", "via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8')", "* d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size", "skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the", "In[3]: # In[4]: # Set image and catalogue filenames # In[5]: #Folder containing", "initiated with a map, noise map, primary header and map header and can", "a map, noise map, primary header and map header and can be set", "print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density =", "from astropy.coordinates import SkyCoord from astropy import units as u for i in", "u for i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1 =", "the three XID+ prior classes. # In[41]: #pixsize array (size of pixels in", "a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'],", "prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel", "additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered", "pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y", "L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths", "the same area. It can also take in MOCs as selection functions to", "python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we", "catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map", "hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy", "prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table", "skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec'])", "fin: if 'redshift =' in line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i)", "carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a", "for each map being fitted. It is initiated with a map, noise map,", "#search around all of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every", "respectively. Lets use the astropy module to construct a Gaussian PSF and assign", "i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list))", "pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get", "is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep = 18", "XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed", "#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355) from", "than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)", "scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of", "# XID+ is built around two python classes. A prior and posterior class.", "modules # In[3]: # In[4]: # Set image and catalogue filenames # In[5]:", "src]]) + ' \\n') elif 'fracAGN =' in line: fout.write(' fracAGN = '", "are matches if ind.sum() >0: #choose the closest and check if its in", "[] fluxes = [] for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i *", "ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import", "in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate", "to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as u #create", "response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires", "# In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500", "not in prior list if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]])", "# In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import", "alt_model=False): # Import required modules # In[3]: # In[4]: # Set image and", "import units as u for i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if", "and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior", "table of candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source", "(separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty # # <NAME>", "'dec')) #add candidate source to new sources table, create prior list if new_Table", "= SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new sources idxc, idxcatalog, d2d,", "u #create table of candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add", "import arviz as az from astropy.coordinates import SkyCoord from astropy import units as", "and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu", "from astropy import units as u import os import pymoc from astropy import", "built around two python classes. A prior and posterior class. There should be", "want to fit (and make any cuts). Here we use HELP's VO database", "prior_list # In[64]: import astropy.units as u #create table of candidate source t", "for cutting down maps and catalogues so they cover the same area. It", "range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) #", "wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header", "prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale", "x and y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+", "an input prior catalogue and point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu,", "standard cut (i.e. not star and is detected in at least 3 opt/nir", "SkyCoord #first use standard cut (i.e. not star and is detected in at", "18.15, 25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets", "250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module to", "and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module to construct a", "files and prepare them for fitting with XID+CIGALE, the SED prior model extension", "prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if", "# In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0:", "assign it to the three XID+ prior classes. # In[41]: #pixsize array (size", "dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]]", "prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF can be assumed to", "XID+CIGALE to Extreme Starbursts # In this notebook, we read in the data", "#point response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf", "new sources table, create prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else:", "#get 500 scale in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as", "=' in line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in", "', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \\n') else: fout.write(line) fin.close()", "photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates", "nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i *", "getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size", "order of 15 (the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load", "= wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header", "hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy", "scale in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in", "#pixel size (in arcseconds) hdulist.close() # XID+ uses Multi Order Coverage (MOC) maps", "))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard", "any cuts). Here we use HELP's VO database and directly call it using", "interested in. We will use a HEALPix order of 15 (the resolution: higher", "scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of", "to take the PSF and calculate how muich each source contributes to each", "= QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to new sources table, create", "XID+ prior classes. # In[41]: #pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500])", "36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the", "higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to fit (and make", "im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0])", "prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] ==", "prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord", "prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]:", "category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size", "prior model extension to XID+. Here we focus on sources in [Rowan-Robinson et", "(the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you", "construct a Gaussian PSF and assign it to the three XID+ prior classes.", "is initiated with a map, noise map, primary header and map header and", "astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] /", "= 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation", "prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map", "prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set", "for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list", "Extreme Starbursts # In this notebook, we read in the data files and", "separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation < sep)", "hstack import numpy as np import xidplus # # Applying XID+CIGALE to Extreme", "Lets calculate the pointing matrix for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix()", "specific position we are interested in. We will use a HEALPix order of", "range(0,len(Table)): #limit to matches around interested sources ind = idxc == src #if", "with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500", "input prior catalogue and point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise", "in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j]", "10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]:", "#prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy", "to create a MOC, centered on a specific position we are interested in.", "sources for src in range(0,len(Table)): #limit to matches around interested sources ind =", "astropy.table import QTable, Table import arviz as az from astropy.coordinates import SkyCoord from", "= wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header", "each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def", "catalogue and point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map,", "cover the same area. It can also take in MOCs as selection functions", "map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2]", "pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale", "MOC. It also requires an input prior catalogue and point spread function. #", "= SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind]", "Load in images, noise maps, header info and WCS information # In[6]: #-----250-------------", "hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert", "= SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes", "In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT", "moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to fit (and make any cuts).", "in images, noise maps, header info and WCS information # In[6]: #-----250------------- hdulist", "assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for", "of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map", "bayes combination redshift. We need uncertianty so lets match via `help_id` # In[26]:", "in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms", "to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in", "rather than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam-----------------------", "[pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are interested", "redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + '", "uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes", "pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+',", "if its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in", "c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) #", "TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") #", "None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep = 18 separation =", "PSF. For SPIRE, the PSF can be assumed to be Gaussian with a", "prior classes need to take the PSF and calculate how muich each source", "= d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :]", "#convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size", "'fracAGN =' in line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i", "(i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp +", "prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id']", "', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \\n') elif 'fracAGN ='", "SPIRE, the PSF can be assumed to be Gaussian with a FWHM of", "hdulist.close() # XID+ uses Multi Order Coverage (MOC) maps for cutting down maps", "astropy.table import vstack, hstack import numpy as np import xidplus # # Applying", "classes. # In[41]: #pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response", "process provides what we call a pointing matrix. Lets calculate the pointing matrix", "in MOCs as selection functions to carry out additional cuts. Lets use the", "claimed to have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def", "field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules # In[3]: #", "# In[3]: # In[4]: # Set image and catalogue filenames # In[5]: #Folder", "opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from", "astropy.coordinates import SkyCoord from astropy import units as u import os import pymoc", "Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they", "there are matches if ind.sum() >0: #choose the closest and check if its", "header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)", "fluxes = [] for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp", "= Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src])", "in fin: if 'redshift =' in line: fout.write(' redshift = ' + ',", "to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist", "arviz as az from astropy.coordinates import SkyCoord from astropy import units as u", "np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty #", "+ ' \\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait()", "uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so", "w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits)", "Load in catalogue you want to fit (and make any cuts). Here we", "For SPIRE, the PSF can be assumed to be Gaussian with a FWHM", "Redshift and Uncertianty # # <NAME> defines a median and a hierarchical bayes", "Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to", "interested sources ind = idxc == src #if there are matches if ind.sum()", "y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources", "pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000", "im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel", "model extension to XID+. Here we focus on sources in [Rowan-Robinson et al.", "dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] =", "map, noise map, primary header and map header and can be set with", "Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and", "return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior", "os import pymoc from astropy import wcs from astropy.table import vstack, hstack import", "ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import units", "\"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first", "spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs", "to new sources table, create prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table]))", "In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header", "# In[41]: #pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function", "i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False", "return prior_list # In[64]: import astropy.units as u #create table of candidate source", "bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input", "pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create", "folder output_folder='./' # Load in images, noise maps, header info and WCS information", "down maps and catalogues so they cover the same area. It can also", "vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM", "(assumes Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500--------", "to each pixel. This process provides what we call a pointing matrix. Lets", "create a MOC, centered on a specific position we are interested in. We", "import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0]", "str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before", "each source contributes to each pixel. This process provides what we call a", "c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new sources idxc, idxcatalog,", "= (separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift", "function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev", ":] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size,", "# In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True", "# In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary", "str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need", "fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) +", "output_folder='./' # Load in images, noise maps, header info and WCS information #", "be a prior class for each map being fitted. It is initiated with", "three XID+ prior classes. # In[41]: #pixsize array (size of pixels in arcseconds)", "Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a", "header and map header and can be set with a MOC. It also", "# In[4]: # Set image and catalogue filenames # In[5]: #Folder containing maps", "import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for i", "on a specific position we are interested in. We will use a HEALPix", "= {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need to take the PSF", "of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands prfsize=np.array([18.15,25.15,36.3])", "prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if in_prior.sum()", "from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass']", "catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all", "dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord", "i in agn[:, src]]) + ' \\n') else: fout.write(line) fin.close() fout.close() p =", "* d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line", "ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None", "path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules # In[3]: # In[4]:", "centered on a specific position we are interested in. We will use a", "names=('ra', 'dec')) #add candidate source to new sources table, create prior list if", "d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in range(0,len(Table)): #limit to matches around", "#add candidate source to new sources table, create prior list if new_Table is", "In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import units as u for", "a prior class for each map being fitted. It is initiated with a", "grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using '", "+ str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) #", "pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use", "and calculate how muich each source contributes to each pixel. This process provides", "SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = []", "the PSF and calculate how muich each source contributes to each pixel. This", "(MOC) maps for cutting down maps and catalogues so they cover the same", "moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF can be assumed", "map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian", "not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep = 18 separation", "# In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard cut", "fitting, the prior classes need to take the PSF and calculate how muich", "WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from", "sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in range(0,len(Table)):", "im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0])", "not star and is detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) &", "from astropy.coordinates import SkyCoord from astropy import units as u import os import", "is detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord", "# ## Get Redshift and Uncertianty # # <NAME> defines a median and", "'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust", "# In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import units as u", "a hierarchical bayes combination redshift. We need uncertianty so lets match via `help_id`", "pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need to", "hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert", "as u for i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1", "fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set", "# # <NAME> defines a median and a hierarchical bayes combination redshift. We", "fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift", "# Set PSF. For SPIRE, the PSF can be assumed to be Gaussian", "(masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c =", "and y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+'", "construct_prior(t) if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation", "sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source", "agn[:, src]]) + ' \\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'],", "its not in prior list if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources", "selection functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/)", "== src #if there are matches if ind.sum() >0: #choose the closest and", "astropy.coordinates import SkyCoord #first use standard cut (i.e. not star and is detected", "masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around", "noise maps, header info and WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits)", "and map header and can be set with a MOC. It also requires", "prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)", "create prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t)", "prior list if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list", "500 map #output folder output_folder='./' # Load in images, noise maps, header info", "star and is detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)]", "the data files and prepare them for fitting with XID+CIGALE, the SED prior", "size (in arcseconds) hdulist.close() # XID+ uses Multi Order Coverage (MOC) maps for", "Uncertianty # # <NAME> defines a median and a hierarchical bayes combination redshift.", "fitted. It is initiated with a map, noise map, primary header and map", "from astropy.io import ascii, fits from astropy.table import QTable, Table import arviz as", "use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific", "the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src", "candidate source to new sources table, create prior list if new_Table is not", "to create prf (requires stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution import", "500 scale in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d", "d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin", "t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to new sources table,", "defines a median and a hierarchical bayes combination redshift. We need uncertianty so", "fit (and make any cuts). Here we use HELP's VO database and directly", "# # Applying XID+CIGALE to Extreme Starbursts # In this notebook, we read", "closest and check if its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if", "* d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr =", "header and can be set with a MOC. It also requires an input", "#---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF can", "fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from", "prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ##", "500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module to construct a Gaussian", "<1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units", "from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search", "'redshift =' in line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i", "wcs from astropy.table import vstack, hstack import numpy as np import xidplus #", "plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load in images, noise maps, header", "class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src,", "range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] =", "im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header)", "dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import units as u for i", "map being fitted. It is initiated with a map, noise map, primary header", "be set with a MOC. It also requires an input prior catalogue and", "from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)", "take the PSF and calculate how muich each source contributes to each pixel.", "d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\")", "d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for", "\"wt\") for line in fin: if 'redshift =' in line: fout.write(' redshift =", "\\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs =", "w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits)", "of candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to", "d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in", "p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust from astropy.constants", "range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0", "/ 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) *", "list if in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list #", "and claimed to have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]:", "and posterior class. There should be a prior class for each map being", "and catalogues so they cover the same area. It can also take in", "if ind.sum() >0: #choose the closest and check if its in the prior", "size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to", "herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None):", "astropy import units as u for i in range(0,len(prior_list)): for j in range(0,len(prior_list)):", "prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+ is built", "star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'],", "for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev rather", "d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size *", "arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to", "In[64]: import astropy.units as u #create table of candidate source t = QTable([c.ra,", "to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in", "PSF can be assumed to be Gaussian with a FWHM of 18.15, 25.15,", "scale in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in", "src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size *", "w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+ uses Multi", "# Set image and catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250", "map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load in", "we are interested in. We will use a HEALPix order of 15 (the", "#-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to", "module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are", "hierarchical bayes combination redshift. We need uncertianty so lets match via `help_id` #", "SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] #", "In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map", "function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info", "contributes to each pixel. This process provides what we call a pointing matrix.", "use HELP's VO database and directly call it using PyVO # In[10]: import", "# In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id #", "be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 ''", "matrix for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return", "src]]) + ' \\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/')", "ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if in_prior.sum() <1: print(in_prior.sum()) #add", "#if there are matches if ind.sum() >0: #choose the closest and check if", "#help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column,", "Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for i in", "map, uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background", "a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu", "process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules #", "classes. A prior and posterior class. There should be a prior class for", "prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as u #create table of candidate", "# Import required modules # In[3]: # In[4]: # Set image and catalogue", "VO database and directly call it using PyVO # In[10]: import pyvo as", "a HEALPix order of 15 (the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15)", "posterior class. There should be a prior class for each map being fitted.", "image and catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE", "and directly call it using PyVO # In[10]: import pyvo as vo service", "requires an input prior catalogue and point spread function. # # In[40]: #---prior250--------", "import units as u import os import pymoc from astropy import wcs from", "print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as", "mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds)", "in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if in_prior.sum() <1: print(in_prior.sum()) #add to", "for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module", "# In[29]: photoz # In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i]", "prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list #", "'+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting,", "info and WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3", "Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get", "database and directly call it using PyVO # In[10]: import pyvo as vo", "\\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density", "import xidplus # # Applying XID+CIGALE to Extreme Starbursts # In this notebook,", "import wcs from astropy.table import vstack, hstack import numpy as np import xidplus", "Set image and catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map", "same area. It can also take in MOCs as selection functions to carry", "out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC,", "and point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty", "We will use a HEALPix order of 15 (the resolution: higher order means", "be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350", "new sources for src in range(0,len(Table)): #limit to matches around interested sources ind", "we call a pointing matrix. Lets calculate the pointing matrix for each prior", "photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from", "idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in range(0,len(Table)): #limit", "in the data files and prepare them for fitting with XID+CIGALE, the SED", "of 15 (the resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in", "in_prior.sum() <1: print(in_prior.sum()) #add to appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import", "to matches around interested sources ind = idxc == src #if there are", "from astropy.coordinates import SkyCoord #first use standard cut (i.e. not star and is", "detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from", "ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]:", "of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map", "new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in", "wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header", "pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load in images,", "remove_ind = (separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ## Get", "subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn", "and catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350", "sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i,", "bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n')", "#pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the", "in line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:,", "= [] fluxes = [] for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i", "if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from", "the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position", "to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250,", "#output folder output_folder='./' # Load in images, noise maps, header info and WCS", "d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr", "for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using", "d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout", "density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need to take the", "of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel", "scale in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid,", "/ L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes = []", "header info and WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header", "are interested in. We will use a HEALPix order of 15 (the resolution:", "350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load in images, noise", "information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy", "#if its not in prior list if in_prior.sum() <1: print(in_prior.sum()) #add to appended", "map #output folder output_folder='./' # Load in images, noise maps, header info and", "and a hierarchical bayes combination redshift. We need uncertianty so lets match via", "with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])", "QTable, Table import arviz as az from astropy.coordinates import SkyCoord from astropy import", "cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on", "= ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \\n')", "vstack, hstack import numpy as np import xidplus # # Applying XID+CIGALE to", "' \\n') elif 'fracAGN =' in line: fout.write(' fracAGN = ' + ',", "as selection functions to carry out additional cuts. Lets use the python module", "prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu,", "in z[:, src]]) + ' \\n') elif 'fracAGN =' in line: fout.write(' fracAGN", "3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord", "map, primary header and map header and can be set with a MOC.", "radius=6.0, alt_model=False): # Import required modules # In[3]: # In[4]: # Set image", "in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]:", "position we are interested in. We will use a HEALPix order of 15", "info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with", "#---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id'])", "QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to new sources table, create prior", "> np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty", "moc=moc)#Initialise with map, uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior", "Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu,", "pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get", "M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths =", "pointing matrix for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]:", "import numpy as np import xidplus # # Applying XID+CIGALE to Extreme Starbursts", "In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert", "means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to fit (and", "as 2d grid, and x and y bins for grid (in pixel scale)", "units for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value", "subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for", "In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+ is built around two", "background (assumes Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5)", "str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the", "pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale", "in agn[:, src]]) + ' \\n') else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale',", "+ ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \\n') else: fout.write(line)", "for src in range(0,len(Table)): #limit to matches around interested sources ind = idxc", "mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500------------- hdulist =", "wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+ uses Multi Order Coverage", "dec=Table['dec']) #search around all of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for", "line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]])", "#create table of candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate", "PyVO # In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset", "astropy.coordinates import SkyCoord from astropy import units as u for i in range(0,len(prior_list)):", "#---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF.", "MOC, centered on a specific position we are interested in. We will use", "sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation", "2d grid, and x and y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350)", "of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel", "XID+ is built around two python classes. A prior and posterior class. There", "directly call it using PyVO # In[10]: import pyvo as vo service =", "beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms", "idxc == src #if there are matches if ind.sum() >0: #choose the closest", "fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 =", "a Gaussian PSF and assign it to the three XID+ prior classes. #", "In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]:", "for fitting with XID+CIGALE, the SED prior model extension to XID+. Here we", "prior and posterior class. There should be a prior class for each map", "c.dec], names=('ra', 'dec')) #add candidate source to new sources table, create prior list", "** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table", "im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel", "hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy", "the astropy module to construct a Gaussian PSF and assign it to the", "module to construct a Gaussian PSF and assign it to the three XID+", "three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev rather than fwhm", "' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg))", ":] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout =", "' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \\n') else:", "#pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert", "# In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]:", "terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of", "+ (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp", "im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel", "* nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import vstack, hstack return hstack(wavelengths),", "being fitted. It is initiated with a map, noise map, primary header and", "xidplus # # Applying XID+CIGALE to Extreme Starbursts # In this notebook, we", "fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') #", "the pointing matrix for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() #", "point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map,", "It also requires an input prior catalogue and point spread function. # #", "(in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy", "nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close()", "pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf", "d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size *", "in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except", "of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for", "map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load in images, noise maps,", "arcseconds) hdulist.close() # XID+ uses Multi Order Coverage (MOC) maps for cutting down", "There should be a prior class for each map being fitted. It is", "250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' # Load", "cut (i.e. not star and is detected in at least 3 opt/nir bands)", "units as u import os import pymoc from astropy import wcs from astropy.table", "the three bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev rather than", "# In[64]: import astropy.units as u #create table of candidate source t =", "\\n') elif 'fracAGN =' in line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i)", "18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation <", "all of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources", "#-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to", "fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) +", "elif 'fracAGN =' in line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for", "map header and can be set with a MOC. It also requires an", "FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table def", "as u #create table of candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec'))", "import os import pymoc from astropy import wcs from astropy.table import vstack, hstack", "it to the three XID+ prior classes. # In[41]: #pixsize array (size of", "and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) #", "Before fitting, the prior classes need to take the PSF and calculate how", "# In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+ is built around", "HELP's VO database and directly call it using PyVO # In[10]: import pyvo", "$\\mathrm{\\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PSF", "d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size *", "functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to", "stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian", "two python classes. A prior and posterior class. There should be a prior", "from astropy.table import vstack, hstack import numpy as np import xidplus # #", "mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5)", "def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules", "map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid", "prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF can be", "* FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\") # In[12]: masterlist=resultset.table", "MOCs as selection functions to carry out additional cuts. Lets use the python", "u import os import pymoc from astropy import wcs from astropy.table import vstack,", "XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues", "size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size,", "agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample,", "use the astropy module to construct a Gaussian PSF and assign it to", "= fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250", "def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample =", "for line in fin: if 'redshift =' in line: fout.write(' redshift = '", "p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate", "fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift =' in line:", "resolution: higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want", "in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc'))", "250 scale in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale", "for j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value", "ascii, fits from astropy.table import QTable, Table import arviz as az from astropy.coordinates", "maps and catalogues so they cover the same area. It can also take", "prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty # # <NAME> defines a median", "# In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table", "= c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind)", "from astropy import units as u for i in range(0,len(prior_list)): for j in", "classes need to take the PSF and calculate how muich each source contributes", "so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i", "sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set", "In[4]: # Set image and catalogue filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE", "redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules # In[3]: # In[4]: #", "every new sources for src in range(0,len(Table)): #limit to matches around interested sources", "#convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350-------------", "j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value #", "(requires stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using", "pixel. This process provides what we call a pointing matrix. Lets calculate the", "import vstack, hstack import numpy as np import xidplus # # Applying XID+CIGALE", "i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] /", "scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins", "{}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need to take the PSF and", "to have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None,", "0.028 ))\") # In[12]: masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use", "table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new sources idxc,", "to fit (and make any cuts). Here we use HELP's VO database and", "np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] z =", "sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import", "candidate source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to new", "class for each map being fitted. It is initiated with a map, noise", "for i in agn[:, src]]) + ' \\n') else: fout.write(line) fin.close() fout.close() p", "the PSF can be assumed to be Gaussian with a FWHM of 18.15,", "primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and", "class. There should be a prior class for each map being fitted. It", "fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 =", "also take in MOCs as selection functions to carry out additional cuts. Lets", "service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main", "In[41]: #pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for", "prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1]", "(separation > np.min(separation)) & (separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and", "prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior':", "else: prior_list = construct_prior(t) if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec", "False] # In[39]: prior_list # XID+ is built around two python classes. A", "`help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id", "az from astropy.coordinates import SkyCoord from astropy import units as u import os", "[Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of", "Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution", "line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]])", "fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in agn[:, src]]) + '", "m}$ respectively. Lets use the astropy module to construct a Gaussian PSF and", "to construct a Gaussian PSF and assign it to the three XID+ prior", "them for fitting with XID+CIGALE, the SED prior model extension to XID+. Here", "pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3", "if 'redshift =' in line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for", "prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of", "prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For", "a specific position we are interested in. We will use a HEALPix order", "input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new sources", "how muich each source contributes to each pixel. This process provides what we", "'+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+'", "= open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift ='", "maps, header info and WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header", "im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header)", "all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if in_prior.sum() <1: print(in_prior.sum())", "350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy module to construct", "in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms", "pymoc from astropy import wcs from astropy.table import vstack, hstack import numpy as", "path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules # In[3]:", "MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for i in range(0,len(prior_list)):", "terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of", "= d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\")", "from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new", "FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$", "arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3", "provides what we call a pointing matrix. Lets calculate the pointing matrix for", "d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin:", "#convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----500-------------", "also requires an input prior catalogue and point spread function. # # In[40]:", "== False] # In[39]: prior_list # XID+ is built around two python classes.", "for i in range(0,len(prior_list)): for j in range(0,len(prior_list)): if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs')", "a pointing matrix. Lets calculate the pointing matrix for each prior class #", "/ SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import vstack, hstack", "prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import", "prior_list # XID+ is built around two python classes. A prior and posterior", "extension to XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783)", "In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required", "to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+", "area. It can also take in MOCs as selection functions to carry out", "of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively.", "in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and", "Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in", "It can also take in MOCs as selection functions to carry out additional", "'' for 250, 350 and 500 $\\mathrm{\\mu m}$ respectively. Lets use the astropy", "prf (requires stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit", "In this notebook, we read in the data files and prepare them for", "/ M_sun.value wavelengths = [] fluxes = [] for i in range(0, nsamp):", "This process provides what we call a pointing matrix. Lets calculate the pointing", "import SkyCoord #first use standard cut (i.e. not star and is detected in", "is built around two python classes. A prior and posterior class. There should", "for i in range(0,len(photoz)): photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn", "maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./'", "you want to fit (and make any cuts). Here we use HELP's VO", "as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000 *", "around all of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new", "prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of", "matches if ind.sum() >0: #choose the closest and check if its in the", "##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak') pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250", "= coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] #", "L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes = [] for", "Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) /", "# In this notebook, we read in the data files and prepare them", "of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0,", "list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if in_prior.sum() <1:", "PSF and assign it to the three XID+ prior classes. # In[41]: #pixsize", "nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size,", "z[:, src]]) + ' \\n') elif 'fracAGN =' in line: fout.write(' fracAGN =", "[] for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id']))", "and can be set with a MOC. It also requires an input prior", "None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import units as", "pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak')", "prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+ is built around two python", "im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header)", "prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF can be assumed to be", "In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True #", "SkyCoord from astropy import units as u for i in range(0,len(prior_list)): for j", "#for every new sources for src in range(0,len(Table)): #limit to matches around interested", "d.draw.size, d.src.size)[subsample, :] z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size", "wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i * nsamp + (i)]['sfh.sfr'])", "= service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028", "a MOC, centered on a specific position we are interested in. We will", "noise map, primary header and map header and can be set with a", "SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes = [] for i", "#get 350 scale in terms of pixel scale of map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500", "et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $>", "use standard cut (i.e. not star and is detected in at least 3", "and is detected in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make", "nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close()", "ind.sum() >0: #choose the closest and check if its in the prior list", "= open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift =' in line: fout.write('", "muich each source contributes to each pixel. This process provides what we call", "= SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes = [] for i in", "Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust from astropy.constants import L_sun, M_sun", "=' in line: fout.write(' fracAGN = ' + ', '.join(['{:.13f}'.format(i) for i in", "= [] for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp +", "print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes need to take", "in at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist", "we read in the data files and prepare them for fitting with XID+CIGALE,", "in range(0,len(Table)): #limit to matches around interested sources ind = idxc == src", "the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior list if", "prior classes. # In[41]: #pixsize array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point", "source t = QTable([c.ra, c.dec], names=('ra', 'dec')) #add candidate source to new sources", "10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): #", "notebook, we read in the data files and prepare them for fitting with", "as np import xidplus # # Applying XID+CIGALE to Extreme Starbursts # In", "and prepare them for fitting with XID+CIGALE, the SED prior model extension to", "lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in", "# In[39]: prior_list # XID+ is built around two python classes. A prior", "line in fin: if 'redshift =' in line: fout.write(' redshift = ' +", "can also take in MOCs as selection functions to carry out additional cuts.", "new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True: sep =", "and x and y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting", "i in z[:, src]]) + ' \\n') elif 'fracAGN =' in line: fout.write('", "SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes = [] for i in range(0,", "# <NAME> defines a median and a hierarchical bayes combination redshift. We need", "& (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c", "import pymoc from astropy import wcs from astropy.table import vstack, hstack import numpy", "[prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else: d=data.prior subsample", "# Applying XID+CIGALE to Extreme Starbursts # In this notebook, we read in", "in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3)", "# In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset =", "data files and prepare them for fitting with XID+CIGALE, the SED prior model", "as u import os import pymoc from astropy import wcs from astropy.table import", "appended sources prior_list=vstack([prior_list,masterlist[idxcatalog][ind][np.argmin(d2d[ind])]]) return prior_list # In[64]: import astropy.units as u #create table", "higher order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to", "= idxc == src #if there are matches if ind.sum() >0: #choose the", "and check if its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its", "of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for", "In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+ is", "PSF and calculate how muich each source contributes to each pixel. This process", "pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder output_folder='./' #", "they cover the same area. It can also take in MOCs as selection", "else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size,", "alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation))", "#convert to mJy nim500=hdulist[3].data*1.0E3 #convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size", "#convert to mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #", "d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample,", "else: fout.write(line) fin.close() fout.close() p = subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits')", "create prf (requires stddev rather than fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel", "+ ' \\n') elif 'fracAGN =' in line: fout.write(' fracAGN = ' +", "Coverage (MOC) maps for cutting down maps and catalogues so they cover the", "python classes. A prior and posterior class. There should be a prior class", "In[39]: prior_list # XID+ is built around two python classes. A prior and", "map pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires", "around interested sources ind = idxc == src #if there are matches if", "al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star formation rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$", "from astropy import wcs from astropy.table import vstack, hstack import numpy as np", "photoz # In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if", "#choose the closest and check if its in the prior list all ready", "import SkyCoord from astropy import units as u for i in range(0,len(prior_list)): for", "with map, uncertianty map, wcs info and primary header prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on", "arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy nim500=hdulist[3].data*1.0E3", "pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+ uses Multi Order Coverage (MOC)", "# In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list # XID+", "array (size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three", "grid, and x and y bins for grid (in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500)", "in line: fout.write(' redshift = ' + ', '.join(['{:.13f}'.format(i) for i in z[:,", "take in MOCs as selection functions to carry out additional cuts. Lets use", "size (in arcseconds) hdulist.close() #-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to", "try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list)))", "subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn = d.agn.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :]", "'+ str(prior500.snpix)+' pixels') print('source density = {}'.format(prior250.nsrc/moc.area_sq_deg)) # Before fitting, the prior classes", "cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust from", "mJy w_500 = wcs.WCS(hdulist[1].header) pixsize500=np.abs(3600.0*w_500.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() # XID+ uses", "src in range(0,len(Table)): #limit to matches around interested sources ind = idxc ==", "moc=moc) prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE,", "range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError:", "fwhm hence pfwhm/2.355) from astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak')", "prior catalogue and point spread function. # # In[40]: #---prior250-------- prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with", "M_sun.value wavelengths = [] fluxes = [] for i in range(0, nsamp): sed_plot", "fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 =", "in catalogue you want to fit (and make any cuts). Here we use", "XID+CIGALE, the SED prior model extension to XID+. Here we focus on sources", "with XID+CIGALE, the SED prior model extension to XID+. Here we focus on", "fits from astropy.table import QTable, Table import arviz as az from astropy.coordinates import", "# XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and", "# Load in catalogue you want to fit (and make any cuts). Here", "= subprocess.Popen(['pcigale', 'run'], cwd='/Volumes/pdh_storage/cigale/') p.wait() SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units", "match via `help_id` # In[26]: photoz=Table.read(redshift_file[0]) # In[27]: #help_id=np.empty((len(photoz)),dtype=np.dtype('U27')) for i in range(0,len(photoz)):", "using PyVO # In[10]: import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]:", "#make skycoord from input table c = SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of", "if alt_model==True: sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation >", "bands prfsize=np.array([18.15,25.15,36.3]) #use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence", "matches around interested sources ind = idxc == src #if there are matches", "rate of $> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0],", "Import required modules # In[3]: # In[4]: # Set image and catalogue filenames", "prior on background (assumes Gaussian pdf with mu and sigma) #---prior350-------- prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)", "prepare them for fitting with XID+CIGALE, the SED prior model extension to XID+.", "SEDs[i * nsamp + (i)]['sfh.sfr']) * sed_plot['Fnu']) from astropy.table import vstack, hstack return", "import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value", "#-----350------------- hdulist = fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to", "as az from astropy.coordinates import SkyCoord from astropy import units as u import", "astropy import wcs from astropy.table import vstack, hstack import numpy as np import", "to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in", "prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid (in", "astropy.io import ascii, fits from astropy.table import QTable, Table import arviz as az", "import pyvo as vo service = vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP", "z = d.redshift.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample,", "at least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec'])", "xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False]", "of pixel scale of map prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and", "#first use standard cut (i.e. not star and is detected in at least", "required modules # In[3]: # In[4]: # Set image and catalogue filenames #", "order means higher resolution) moc=pymoc.util.catalog.catalog_to_moc(c,100,15) # Load in catalogue you want to fit", "nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 **", "Lets use the astropy module to construct a Gaussian PSF and assign it", "#get 250 scale in terms of pixel scale of map pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350", "table, create prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list =", "nim350=hdulist[3].data*1.0E3 #convert to mJy w_350 = wcs.WCS(hdulist[1].header) pixsize350=np.abs(3600.0*w_350.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close()", "we use HELP's VO database and directly call it using PyVO # In[10]:", "In[30]: ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind]", "for i in range(0, nsamp): sed_plot = Table.read('/Volumes/pdh_storage/cigale/out/{}_best_model.fits'.format(+SEDs[i * nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength']", "* d.draw.size, d.src.size)[subsample, :] sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin =", "prior350.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the", "to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create", "# In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior", "astropy module to construct a Gaussian PSF and assign it to the three", "wavelengths = [] fluxes = [] for i in range(0, nsamp): sed_plot =", "can be set with a MOC. It also requires an input prior catalogue", "pointing matrix. Lets calculate the pointing matrix for each prior class # In[43]:", "'.join(['{:.13f}'.format(i) for i in agn[:, src]]) + ' \\n') else: fout.write(line) fin.close() fout.close()", "containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE 500 map #output folder", "astropy.units as u #create table of candidate source t = QTable([c.ra, c.dec], names=('ra',", "In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import", "each map being fitted. It is initiated with a map, noise map, primary", "with a map, noise map, primary header and map header and can be", "in. We will use a HEALPix order of 15 (the resolution: higher order", "prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+'", "mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds)", "SkyCoord from astropy import units as u import os import pymoc from astropy", "import astropy.units as u #create table of candidate source t = QTable([c.ra, c.dec],", "masterlist=resultset.table def construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard cut (i.e. not", "In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\",", "redshift_prior=[0.1,2.0], radius=6.0, alt_model=False): # Import required modules # In[3]: # In[4]: # Set", "so they cover the same area. It can also take in MOCs as", "Applying XID+CIGALE to Extreme Starbursts # In this notebook, we read in the", "astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]: ii=0 for", "Gaussian PSF and assign it to the three XID+ prior classes. # In[41]:", "(size of pixels in arcseconds) pixsize=np.array([pixsize250,pixsize350,pixsize500]) #point response function for the three bands", "fitting with XID+CIGALE, the SED prior model extension to XID+. Here we focus", "SkyCoord(ra=Table['ra'], dec=Table['dec']) #search around all of the new sources idxc, idxcatalog, d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec)", "#make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make skycoord from input table c = SkyCoord(ra=Table['ra'],", "make any cuts). Here we use HELP's VO database and directly call it", "this notebook, we read in the data files and prepare them for fitting", "prior350.prior_bkg(-5.0,5) #---prior500-------- prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc) prior500.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior500.prior_bkg(-5.0,5) # Set PSF. For SPIRE, the PSF", "source contributes to each pixel. This process provides what we call a pointing", "SEDs = Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust from astropy.constants import", "ind = idxc == src #if there are matches if ind.sum() >0: #choose", "<NAME> defines a median and a hierarchical bayes combination redshift. We need uncertianty", "except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from astropy import", "SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass'] = SEDs['dust.mass'] / M_sun.value wavelengths = [] fluxes =", "'.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \\n') elif 'fracAGN =' in", "cuts). Here we use HELP's VO database and directly call it using PyVO", "sources table, create prior list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list", "coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]:", "i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0)", "astropy.convolution import Gaussian2DKernel ##---------fit using Gaussian beam----------------------- prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101) prf250.normalize(mode='peak') prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101) prf350.normalize(mode='peak') prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101) prf500.normalize(mode='peak')", "Starbursts # In this notebook, we read in the data files and prepare", "' + ', '.join(['{:.13f}'.format(i) for i in z[:, src]]) + ' \\n') elif", "#limit to matches around interested sources ind = idxc == src #if there", "least 3 opt/nir bands) prior_list=masterlist[(masterlist['flag_gaia']!=3) & (masterlist['flag_optnir_det']>=3)] #make skycoord from masterlist catalog=SkyCoord(ra=masterlist['ra'],dec=masterlist['dec']) #make", "read in the data files and prepare them for fitting with XID+CIGALE, the", "catalogues so they cover the same area. It can also take in MOCs", "psf as 2d grid, and x and y bins for grid (in pixel", "coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) & (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list)))", "need to take the PSF and calculate how muich each source contributes to", "use a HEALPix order of 15 (the resolution: higher order means higher resolution)", "import ascii, fits from astropy.table import QTable, Table import arviz as az from", "prior250.prior_cat(prior_list['ra'],prior_list['dec'],'photoz',ID=prior_list['help_id']) prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma) #---prior350--------", "numpy as np import xidplus # # Applying XID+CIGALE to Extreme Starbursts #", "set more appropriate units for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] =", ":] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if", "d2d, d3d=catalog.search_around_sky(c,radius*u.arcsec) #for every new sources for src in range(0,len(Table)): #limit to matches", "more appropriate units for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity']", "for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False", "* nsamp + (i)]['id'])) wavelengths.append(sed_plot['wavelength'] / 1E3) fluxes.append(((10.0 ** sfr[i, src]) / SEDs[i", "In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]: photoz # In[30]:", "# Load in images, noise maps, header info and WCS information # In[6]:", "its in the prior list all ready in_prior=prior_list['help_id']==masterlist[idxcatalog][ind][np.argmin(d2d[ind])]['help_id'] #if its not in prior", "== prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])])) #prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None #", "print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+", "prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess", "list if new_Table is not None: prior_list=construct_prior(vstack([t,new_Table])) else: prior_list = construct_prior(t) if alt_model==True:", "sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty # # <NAME> defines a", "& (np.tril(dist_matrix)>0) xx,yy=np.meshgrid(np.arange(0,len(prior_list)),np.arange(0,len(prior_list))) yy[ind] # In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask", "a median and a hierarchical bayes combination redshift. We need uncertianty so lets", "calculate how muich each source contributes to each pixel. This process provides what", "the prior classes need to take the PSF and calculate how muich each", "and Uncertianty # # <NAME> defines a median and a hierarchical bayes combination", "ii=0 for i in range(0,len(prior_list)): ind=photoz['help_id'] == prior_list['help_id'][i] try: if photoz['z1_median'][ind]>0.0: prior_list['redshift'][i]=photoz['z1_median'][ind] prior_list['redshift_unc'][i]=np.max(np.array([np.abs(photoz['z1_median'][ind]-photoz['z1_min'][ind]),np.abs(photoz['z1_max'][ind]-photoz['z1_median'][ind])]))", "= Table.read('/Volumes/pdh_storage/cigale/out//models-block-0.fits') # set more appropriate units for dust from astropy.constants import L_sun,", "primary header and map header and can be set with a MOC. It", "each pixel. This process provides what we call a pointing matrix. Lets calculate", "combination redshift. We need uncertianty so lets match via `help_id` # In[26]: photoz=Table.read(redshift_file[0])", "astropy import units as u import os import pymoc from astropy import wcs", "Here we use HELP's VO database and directly call it using PyVO #", "if category=='posterior': d=data.posterior else: d=data.prior subsample = np.random.choice(d.chain.size * d.draw.size, size=nsamp,replace=False) agn =", "str(prior250.nsrc)+' sources \\n') print('using ' + str(prior250.snpix)+', '+ str(prior350.snpix)+' and '+ str(prior500.snpix)+' pixels')", "focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a", "(in arcseconds) hdulist.close() #-----500------------- hdulist = fits.open(plwfits) im500phdu=hdulist[0].header im500hdu=hdulist[1].header im500=hdulist[1].data*1.0E3 #convert to mJy", "calculate the pointing matrix for each prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix()", "sfr = d.sfr.values.reshape(d.chain.size * d.draw.size, d.src.size)[subsample, :] fin = open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\",", "# In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3", "#convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size", "to Extreme Starbursts # In this notebook, we read in the data files", "what we call a pointing matrix. Lets calculate the pointing matrix for each", "$> 10^{3}\\mathrm{M_{\\odot}yr^{-1}}$ # In[2]: def process_prior(c,new_Table=None, path_to_data=['../../../data/'], field=['Lockman-SWIRE'], path_to_SPIRE=['/Volumes/pdh_storage/dmu_products/dmu19/dmu19_HELP-SPIRE-maps/data/'], redshift_file=[\"/Volumes/pdh_storage/dmu_products/dmu24/dmu24_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised_UPDATED_IDs_20180219.fits\"], redshift_prior=[0.1,2.0], radius=6.0, alt_model=False):", "import SkyCoord from astropy import units as u import os import pymoc from", "sep = 18 separation = c.separation(SkyCoord(prior_list['ra'], prior_list['dec'])).arcsec remove_ind = (separation > np.min(separation)) &", "= vo.dal.TAPService(\"https://herschel-vos.phys.sussex.ac.uk/__system__/tap/run/tap\") # In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE", "to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist", "#prior_list['redshift_unc'].mask[i]=False #prior_list['redshift'].mask[i]=False except ValueError: None # In[33]: dist_matrix=np.zeros((len(prior_list),len(prior_list))) from astropy.coordinates import SkyCoord from", "appropriate units for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] /", "if i>j: coord1 = SkyCoord(ra=prior_list['ra'][i]*u.deg,dec=prior_list['dec'][i]*u.deg,frame='icrs') coord2=SkyCoord(ra=prior_list['ra'][j]*u.deg,dec=prior_list['dec'][j]*u.deg) dist_matrix[i,j] = coord1.separation(coord2).value # In[35]: ind=(np.tril(dist_matrix)<1.0/3600.0) &", "src #if there are matches if ind.sum() >0: #choose the closest and check", "Table import arviz as az from astropy.coordinates import SkyCoord from astropy import units", "(in pixel scale) prior350.set_prf(prf350.array,pind350,pind350) prior500.set_prf(prf500.array,pind500,pind500) print('fitting '+ str(prior250.nsrc)+' sources \\n') print('using ' +", "we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have", "to XID+. Here we focus on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and", "service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\", 0.028 ))\")", "# In[11]: resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra,", "< sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty # # <NAME> defines", "im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert to mJy nim250=hdulist[3].data*1.0E3 #convert to mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0])", "Order Coverage (MOC) maps for cutting down maps and catalogues so they cover", "mJy w_250 = wcs.WCS(hdulist[1].header) pixsize250=np.abs(3600.0*w_250.wcs.cdelt[0]) #pixel size (in arcseconds) hdulist.close() #-----350------------- hdulist =", "filenames # In[5]: #Folder containing maps pswfits=path_to_SPIRE[0]+'{}_SPIRE250_v1.0.fits'.format(field[0])#SPIRE 250 map pmwfits=path_to_SPIRE[0]+'{}_SPIRE350_v1.0.fits'.format(field[0])#SPIRE 350 map plwfits=path_to_SPIRE[0]+'{}_SPIRE500_v1.0.fits'.format(field[0])#SPIRE", "Set PSF. For SPIRE, the PSF can be assumed to be Gaussian with", "matrix. Lets calculate the pointing matrix for each prior class # In[43]: prior250.get_pointing_matrix()", "np import xidplus # # Applying XID+CIGALE to Extreme Starbursts # In this", "prior class # In[43]: prior250.get_pointing_matrix() prior350.get_pointing_matrix() prior500.get_pointing_matrix() # In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data,", "open(\"/Volumes/pdh_storage/cigale/pcigale_orig.ini\") fout = open(\"/Volumes/pdh_storage/cigale/pcigale.ini\", \"wt\") for line in fin: if 'redshift =' in", "for dust from astropy.constants import L_sun, M_sun SEDs['dust.luminosity'] = SEDs['dust.luminosity'] / L_sun.value SEDs['dust.mass']", "& (separation < sep) prior_list.remove_rows(remove_ind) # ## Get Redshift and Uncertianty # #", "on sources in [Rowan-Robinson et al. 2018](https://arxiv.org/abs/1704.07783) and claimed to have a star", "In[36]: prior_list[yy[ind]] # In[37]: prior_list['redshift'].mask[yy[ind]]=True # In[38]: prior_list=prior_list[prior_list['redshift'].mask == False] # In[39]: prior_list", "catalogue you want to fit (and make any cuts). Here we use HELP's", "construct_prior(Table=None): from astropy.coordinates import SkyCoord #first use standard cut (i.e. not star and", "median and a hierarchical bayes combination redshift. We need uncertianty so lets match", "A prior and posterior class. There should be a prior class for each", "In[44]: return [prior250,prior350,prior500],prior_list def getSEDs(data, src, nsamp=30,category='posterior'): import subprocess if category=='posterior': d=data.posterior else:", "= fits.open(pmwfits) im350phdu=hdulist[0].header im350hdu=hdulist[1].header im350=hdulist[1].data*1.0E3 #convert to mJy nim350=hdulist[3].data*1.0E3 #convert to mJy w_350", "resultset = service.search(\"SELECT TOP 10000 * FROM herschelhelp.main WHERE 1=CONTAINS(POINT('ICRS', ra, dec),CIRCLE('ICRS',\"+str(c.ra.deg[0])+\", \"+str(c.dec.deg[0])+\",", "and WCS information # In[6]: #-----250------------- hdulist = fits.open(pswfits) im250phdu=hdulist[0].header im250hdu=hdulist[1].header im250=hdulist[1].data*1.0E3 #convert", "photoz['help_id'][i]=str(photoz['help_id'][i].strip()).encode('utf-8') #photoz['help_id']=help_id # In[28]: from astropy.table import Column, MaskedColumn prior_list['redshift']=MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[0]),mask=[False]*len(prior_list)) prior_list.add_column(MaskedColumn(np.full((len(prior_list)),fill_value=redshift_prior[1]),mask=[False]*len(prior_list),name='redshift_unc')) # In[29]:", "call a pointing matrix. Lets calculate the pointing matrix for each prior class" ]
[]
[ "response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has", "terms of the License. All rights not expressly granted by the License are", "print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] =", "agreed to separately in writing, software distributed under the License is distributed on", "You may obtain a copy of the License at https://developer.cisco.com/docs/licenses All use of", "} response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200 or response.status_code", "Token: ' + access_token + ' Refresh token: ' + refresh_token) else :", "{ \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url =", ": update_Admin('Hello, the cronjob was not able to generate new access tocken for", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \"\"\"", "for the new-employee-onboarding integration. Access Token: ' + access_token + ' Refresh token:", "License. All rights not expressly granted by the License are reserved. Unless required", "rights not expressly granted by the License are reserved. Unless required by applicable", "granted by the License are reserved. Unless required by applicable law or agreed", "= refresh_token update_Admin('Hello, New access tocken has been generated for the new-employee-onboarding integration.", "is the newly generated token # { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599,", "or implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization':", "may obtain a copy of the License at https://developer.cisco.com/docs/licenses All use of the", "s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type':", "= \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response =", "access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token", "the new-employee-onboarding integration. Here is the response code : ' + str(response.status_code) +", "separately in writing, software distributed under the License is distributed on an \"AS", "f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data)", "url, headers=headers, data=payload) if response.status_code == 200 or response.status_code == 204 : print(response.text)", "reserved. Unless required by applicable law or agreed to separately in writing, software", "# \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('*** Ran", "been generated for the new-employee-onboarding integration. Access Token: ' + access_token + '", "\"\"\"Copyright (c) 2020 Cisco and/or its affiliates. This software is licensed to you", "has been generated for the new-employee-onboarding integration. Access Token: ' + access_token +", "s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\"", "access_token + ' Refresh token: ' + refresh_token) else : update_Admin('Hello, the cronjob", "generate new access tocken for the new-employee-onboarding integration. Here is the response code", "in accordance with the terms of the License. All rights not expressly granted", "able to generate new access tocken for the new-employee-onboarding integration. Here is the", "response text below. access_token is the newly generated token # { # \"access_token\":", "must be in accordance with the terms of the License. All rights not", "are reserved. Unless required by applicable law or agreed to separately in writing,", "KIND, either express or implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s =", "the License at https://developer.cisco.com/docs/licenses All use of the material herein must be in", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload)", "the terms of the Cisco Sample Code License, Version 1.1 (the \"License\"). You", "integration. Here is the response code : ' + str(response.status_code) + '.') #", "# samlpe response text below. access_token is the newly generated token # {", "\"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # }", "be in accordance with the terms of the License. All rights not expressly", "copy of the License at https://developer.cisco.com/docs/licenses All use of the material herein must", "the response code : ' + str(response.status_code) + '.') # samlpe response text", "herein must be in accordance with the terms of the License. All rights", "at https://developer.cisco.com/docs/licenses All use of the material herein must be in accordance with", "material herein must be in accordance with the terms of the License. All", ": ' + str(response.status_code) + '.') # samlpe response text below. access_token is", "or response.status_code == 204 : print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token)", "License, Version 1.1 (the \"License\"). You may obtain a copy of the License", "} resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers", "response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New", "integration. Access Token: ' + access_token + ' Refresh token: ' + refresh_token)", "by the License are reserved. Unless required by applicable law or agreed to", "requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200 or response.status_code == 204 :", "either express or implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s = requests.Session()", "is licensed to you under the terms of the Cisco Sample Code License,", "for the new-employee-onboarding integration. Here is the response code : ' + str(response.status_code)", "Here is the response code : ' + str(response.status_code) + '.') # samlpe", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL", "# \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 #", "os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN']", "else : update_Admin('Hello, the cronjob was not able to generate new access tocken", "+ '.') # samlpe response text below. access_token is the newly generated token", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "Refresh token: ' + refresh_token) else : update_Admin('Hello, the cronjob was not able", "token # { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", #", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "tocken for the new-employee-onboarding integration. Here is the response code : ' +", "cronjob was not able to generate new access tocken for the new-employee-onboarding integration.", "os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has been generated for the new-employee-onboarding", "# \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('*** Ran the cron script", "New access tocken has been generated for the new-employee-onboarding integration. Access Token: '", "expressly granted by the License are reserved. Unless required by applicable law or", "headers=headers, data=payload) if response.status_code == 200 or response.status_code == 204 : print(response.text) access_token", "generated for the new-employee-onboarding integration. Access Token: ' + access_token + ' Refresh", "new-employee-onboarding integration. Access Token: ' + access_token + ' Refresh token: ' +", "200 or response.status_code == 204 : print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token']", "}) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\":", "<gh_stars>1-10 \"\"\"Copyright (c) 2020 Cisco and/or its affiliates. This software is licensed to", "Cisco and/or its affiliates. This software is licensed to you under the terms", "access tocken for the new-employee-onboarding integration. Here is the response code : '", "Unless required by applicable law or agreed to separately in writing, software distributed", "os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\",", "access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has been generated for the", "# { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\":", "not able to generate new access tocken for the new-employee-onboarding integration. Here is", "\"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp", "+ str(response.status_code) + '.') # samlpe response text below. access_token is the newly", "== 204 : print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] =", "+ refresh_token) else : update_Admin('Hello, the cronjob was not able to generate new", "os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has been generated", "'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200", "the newly generated token # { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, #", "json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded'", "= requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200 or response.status_code == 204", "Code License, Version 1.1 (the \"License\"). You may obtain a copy of the", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello,", "'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200 or", "below. access_token is the newly generated token # { # \"access_token\": \"<KEY>\", #", "+ access_token + ' Refresh token: ' + refresh_token) else : update_Admin('Hello, the", "url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response", "the new-employee-onboarding integration. Access Token: ' + access_token + ' Refresh token: '", "' + refresh_token) else : update_Admin('Hello, the cronjob was not able to generate", "response code : ' + str(response.status_code) + '.') # samlpe response text below.", "and/or its affiliates. This software is licensed to you under the terms of", "its affiliates. This software is licensed to you under the terms of the", "refresh_token update_Admin('Hello, New access tocken has been generated for the new-employee-onboarding integration. Access", "implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer", "access_token is the newly generated token # { # \"access_token\": \"<KEY>\", # \"expires_in\":", "was not able to generate new access tocken for the new-employee-onboarding integration. Here", "under the terms of the Cisco Sample Code License, Version 1.1 (the \"License\").", "of the Cisco Sample Code License, Version 1.1 (the \"License\"). You may obtain", "law or agreed to separately in writing, software distributed under the License is", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "Version 1.1 (the \"License\"). You may obtain a copy of the License at", "License at https://developer.cisco.com/docs/licenses All use of the material herein must be in accordance", "update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL =", "data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status()", "Sample Code License, Version 1.1 (the \"License\"). You may obtain a copy of", "' Refresh token: ' + refresh_token) else : update_Admin('Hello, the cronjob was not", "use of the material herein must be in accordance with the terms of", "the cronjob was not able to generate new access tocken for the new-employee-onboarding", "\"\"\" import os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \"", "tocken has been generated for the new-employee-onboarding integration. Access Token: ' + access_token", "newly generated token # { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\":", "{ # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474", "\"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('*** Ran the cron script ***')", "by applicable law or agreed to separately in writing, software distributed under the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \"\"\" import", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "the License. All rights not expressly granted by the License are reserved. Unless", "resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' }", "new-employee-onboarding integration. Here is the response code : ' + str(response.status_code) + '.')", "is the response code : ' + str(response.status_code) + '.') # samlpe response", "1.1 (the \"License\"). You may obtain a copy of the License at https://developer.cisco.com/docs/licenses", "the License are reserved. Unless required by applicable law or agreed to separately", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers =", "data=payload) if response.status_code == 200 or response.status_code == 204 : print(response.text) access_token =", "' + str(response.status_code) + '.') # samlpe response text below. access_token is the", "the terms of the License. All rights not expressly granted by the License", "refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access", "== 200 or response.status_code == 204 : print(response.text) access_token = response.json()['access_token'] refresh_token =", "a copy of the License at https://developer.cisco.com/docs/licenses All use of the material herein", "accordance with the terms of the License. All rights not expressly granted by", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "str(response.status_code) + '.') # samlpe response text below. access_token is the newly generated", "code : ' + str(response.status_code) + '.') # samlpe response text below. access_token", "= requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url", "\"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('***", "2020 Cisco and/or its affiliates. This software is licensed to you under the", "of the material herein must be in accordance with the terms of the", "= s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = {", "the material herein must be in accordance with the terms of the License.", "you under the terms of the Cisco Sample Code License, Version 1.1 (the", "response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code == 200 or response.status_code ==", "text below. access_token is the newly generated token # { # \"access_token\": \"<KEY>\",", "CONDITIONS OF ANY KIND, either express or implied. \"\"\" import os, requests def", "= response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken", "OF ANY KIND, either express or implied. \"\"\" import os, requests def update_Admin(messageToAdmin):", "software is licensed to you under the terms of the Cisco Sample Code", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "import os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" +", "\"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\"", "the Cisco Sample Code License, Version 1.1 (the \"License\"). You may obtain a", "payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url,", "to separately in writing, software distributed under the License is distributed on an", "https://developer.cisco.com/docs/licenses All use of the material herein must be in accordance with the", "This software is licensed to you under the terms of the Cisco Sample", "1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('*** Ran the cron", "update_Admin('Hello, New access tocken has been generated for the new-employee-onboarding integration. Access Token:", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "of the License. All rights not expressly granted by the License are reserved.", "OR CONDITIONS OF ANY KIND, either express or implied. \"\"\" import os, requests", "ANY KIND, either express or implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s", "express or implied. \"\"\" import os, requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({", "All use of the material herein must be in accordance with the terms", "token: ' + refresh_token) else : update_Admin('Hello, the cronjob was not able to", "(c) 2020 Cisco and/or its affiliates. This software is licensed to you under", "(the \"License\"). You may obtain a copy of the License at https://developer.cisco.com/docs/licenses All", "of the License at https://developer.cisco.com/docs/licenses All use of the material herein must be", "+ os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\":", "requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url =", "\"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' +", "+ os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers,", "samlpe response text below. access_token is the newly generated token # { #", "WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin,", "All rights not expressly granted by the License are reserved. Unless required by", "messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN']", "+ ' Refresh token: ' + refresh_token) else : update_Admin('Hello, the cronjob was", "= \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, }", "affiliates. This software is licensed to you under the terms of the Cisco", "\"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data =", "access tocken has been generated for the new-employee-onboarding integration. Access Token: ' +", "= { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url", "obtain a copy of the License at https://developer.cisco.com/docs/licenses All use of the material", "{ 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code ==", "refresh_token) else : update_Admin('Hello, the cronjob was not able to generate new access", "requests def update_Admin(messageToAdmin): s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] })", "licensed to you under the terms of the Cisco Sample Code License, Version", "in writing, software distributed under the License is distributed on an \"AS IS\"", "\"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token=' + os.environ['WEBEX_REFRESH_TOKEN'] headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\",", "to generate new access tocken for the new-employee-onboarding integration. Here is the response", "Cisco Sample Code License, Version 1.1 (the \"License\"). You may obtain a copy", "not expressly granted by the License are reserved. Unless required by applicable law", "License are reserved. Unless required by applicable law or agreed to separately in", "\"License\"). You may obtain a copy of the License at https://developer.cisco.com/docs/licenses All use", "with the terms of the License. All rights not expressly granted by the", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data", "' + access_token + ' Refresh token: ' + refresh_token) else : update_Admin('Hello,", "required by applicable law or agreed to separately in writing, software distributed under", "response.status_code == 204 : print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"]", "terms of the Cisco Sample Code License, Version 1.1 (the \"License\"). You may", "'.') # samlpe response text below. access_token is the newly generated token #", "headers = { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload) if", "to you under the terms of the Cisco Sample Code License, Version 1.1", ": print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"]", "update_Admin('Hello, the cronjob was not able to generate new access tocken for the", "if response.status_code == 200 or response.status_code == 204 : print(response.text) access_token = response.json()['access_token']", "generated token # { # \"access_token\": \"<KEY>\", # \"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\",", "applicable law or agreed to separately in writing, software distributed under the License", "\"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url, json=data) resp.raise_for_status() url = \"https://webexapis.com/v1/access_token\" payload='grant_type=refresh_token&client_id=C91eb42821df0d8d205d90cd27cedfb004eeff6b787054578647f1baa7eb70868&client_secret=8ad397e278d9f6392739fb109da69d9bf35fb1dfc9b40ff66fdb9c8dc4e2d2dc&refresh_token='", "= { 'Content-Type': 'application/x-www-form-urlencoded' } response = requests.request(\"POST\", url, headers=headers, data=payload) if response.status_code", "\"expires_in\": 1209599, # \"refresh_token\": \"OTVmNjFiZjEtYmM1ZS00NWU4LWJmOWUtMTBkMDlkMGM2NTI3YWM3YjI3MzEtODNk_P0A1_36252b39-4c39-48c5-933f-afa3bbc77901\", # \"refresh_token_expires_in\": 6435474 # } print('*** Ran the", "\" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\" url = f\"{WEBEX_BASE_URL}/v1/messages\" data = {", "print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has been", "= access_token os.environ[\"WEBEX_REFRESH_TOKEN\"] = refresh_token update_Admin('Hello, New access tocken has been generated for", "url = f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp =", "204 : print(response.text) access_token = response.json()['access_token'] refresh_token = response.json()['refresh_token'] print(access_token) os.environ[\"WEBEX_ACCESS_TOKEN\"] = access_token", "new access tocken for the new-employee-onboarding integration. Here is the response code :", "response.status_code == 200 or response.status_code == 204 : print(response.text) access_token = response.json()['access_token'] refresh_token", "= f\"{WEBEX_BASE_URL}/v1/messages\" data = { \"toPersonEmail\": \"<EMAIL>\", \"text\": messageToAdmin, } resp = s.post(url,", "s = requests.Session() s.headers.update({ 'Authorization': \"Bearer \" + os.environ['WEBEX_BOT_TOKEN'] }) WEBEX_BASE_URL = \"https://webexapis.com\"", "Access Token: ' + access_token + ' Refresh token: ' + refresh_token) else", "or agreed to separately in writing, software distributed under the License is distributed", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \"\"\" import os," ]
[ "exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for the", "exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] =", "in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data", "2) & (np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) >", "val > limits[1] def max_repeat(s): \"\"\"Maximumum number of times the same value repeats", "info): \"\"\"Package trial information for the remote.\"\"\" t_info, _ = info return t_info.to_json()", "This counter is reset at beginning of frame_range # so it should correspond", "m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s) for", "exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color", "np.concatenate([ np.arange(c) + 1 for c in pulse_count ]) n_pulses = pulse_count.sum() #", "else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback,", "check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if", "= exp.clock.getTime() - blink_pad_start # Show each frame of the stimulus for frame", "exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\",", "- exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] =", "pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() #", "t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\",", "exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp, center,", "reorder the columns so they are more intuitively organized? return trial_info, pulse_info #", "trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial", "= [] for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size,", "exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True)", "constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks -", "i, dtype=np.int) for i, c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c)", "= np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc,", "dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv,", "s = pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def", "rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use", "exp.trial_data] pulse_data = [p_data for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname", "people have right when # they come out of the blink (according to", "t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial interval", "candidate from this sample a = uniform(0, 2 * np.pi) r = uniform(radius,", "of the blink (according to Eyelink?) # TODO can we make life easier", "= exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\")", "exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info,", "stimulus to a side stim_pos = np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos)", "n_pulses = pulse_count.sum() # Assign gaps between pulses run_duration = np.inf while not_in_range(run_duration,", "the blink (according to Eyelink?) # TODO can we make life easier later", "dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # ---", "trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information", "frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix:", "prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ])", "gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the first random sample if we're", "Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\",", "of trials, might be only part of a run.\"\"\" if rng is None:", "flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for p, info in p_info.iterrows():", "\"targets\"]) # Record the time of first flip as the offset of the", "exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10", "fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm,", "+ run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%}", "in_array and in_ring: # Accept the candidate samples.append((x, y)) queue.append((x, y)) break if", "exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size,", "= exp.s.pattern.array.phases # Check if the eye is blinking and possibly wait a", "size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for xy_i", "start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time,", "def run_trial(exp, info): \"\"\"Function that executes what happens in each trial.\"\"\" t_info, p_info", "on=\"trial\") # TODO reorder the columns so they are more intuitively organized? return", "** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos]", "+= len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True)", "for the remote.\"\"\" t_info, _ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise", "truncated geometric distribution.\"\"\" a, b = min(support) - 1, max(support) dist = stats.geom(p=p,", "pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust", "data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem", "exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings,", "\"\"\"Add extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1,", "np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist =", "know will work start = 0, 0 samples = [start] queue = [start]", "possibly wait a bit if so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout):", "run.\"\"\" if rng is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run # ---", "Generate a candidate from this sample a = uniform(0, 2 * np.pi) r", "the last pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response", "randint = rng.randint # Start at a fixed point we know will work", "# ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame,", "np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, ))", "code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\",", "for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info", "exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time", "= not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter is reset at", "not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return", "in sequence.\"\"\" s = pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() +", "(maybe because the info df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases #", "+ 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b", "exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc", "accept the candidate in_array = (np.abs(x) < size / 2) & (np.abs(y) <", "return trial_info, pulse_info # --- Support functions for block generation def not_in_range(val, limits):", "get a good solution for saving these # Currently it errors out (maybe", "1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in", "= 0, 0 samples = [start] queue = [start] while queue: # Pick", "design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks", "\"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in [0, 1]:", "s0, s1 = p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1", "defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus", "= len(lines) height = .5 heights = (np.arange(n)[::-1] - (n / 2 -", "return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"]", "exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play()", "to accept the candidate in_array = (np.abs(x) < size / 2) & (np.abs(y)", "prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct", "/ 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and in_ring:", "report of their performance.\"\"\" lines = [\"End of the run!\"] prior_trials = prior_correct", "= l1 - l0 return llr # --- Exeperiment execution def run_trial(exp, info):", "in exp.trial_data] pulse_data = [p_data for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data)", "# Pick a sample to expand from s_idx = randint(len(queue)) s_x, s_y =", "1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n =", "now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if", "pd.DataFrame([t for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return", "each trial for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield", "1) == candidates: # We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class", "at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\"", "break # --- Build the trial_info structure trial = np.arange(1, n_trials + 1)", ") all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add", "make more general and move into visigoth # TODO currently assumes square array", "\"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims,", "t_info, _ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" #", "p.keep_on_time: break # --- Build the trial_info structure trial = np.arange(1, n_trials +", "# --- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\")", "= exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for", "> limits[1] def max_repeat(s): \"\"\"Maximumum number of times the same value repeats in", "pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr,", "rows = pulse_dist == i n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng,", "m, s) for pat, c in zip(patterns, cs): pat.contrast = c def draw(self):", "the first random sample if we're not being precise # about the overall", "info.\"\"\" # TODO let us set random number generator somehow. Command line? #", "\"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each frame of the stimulus for", "draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] =", "for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\",", "n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(),", "blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"])", "__future__ import division import os import json from glob import glob import numpy", "= exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) #", "os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in", "the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist,", "# TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng is None:", "i in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials)", "this sample a = uniform(0, 2 * np.pi) r = uniform(radius, 2 *", "t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res", ") # Add trial-level information computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False)", "= exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if", "exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want to wait a", "cs = 10 ** stats.norm.ppf(qs, m, s) for pat, c in zip(patterns, cs):", "\"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct = np.average([prior_correct,", "flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be", "all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols} ) all_pulses = all_pulses.assign(", "trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so they are more intuitively organized?", "= pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean()", "AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None:", "queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf =", "draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions", "in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd = np.inf expected_acc = np.inf", "# This counter is reset at beginning of frame_range # so it should", "\"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols}", "if run_correct is not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if", "based on contrast and generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1 =", "** log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim", "# ~~~ Response period # Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] =", "p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n = rows.sum() log_contrast[rows] = flexible_values(dist,", "all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information that's not part of", "general and move into visigoth # TODO currently assumes square array # See", "run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information", "exp.p) for i in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"]", "and in_ring: # Accept the candidate samples.append((x, y)) queue.append((x, y)) break if (i", "= prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct),", "(i.e. in psychophys rig) if not p.keep_on_time: break # --- Build the trial_info", "Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color =", "stimulus? How much vision do people have right when # they come out", "res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] =", "to frames dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in", "log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for each trial for trial,", "pulse_info # --- Support functions for block generation def not_in_range(val, limits): \"\"\"False if", "= 0 total_iti = wait_iti.sum() # Use the first random sample if we're", "good solution for saving these # Currently it errors out (maybe because the", "\"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"]", "the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler =", "= len(patterns) qs = np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs", "fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for xy_i in xy: pattern =", "(np.arange(n)[::-1] - (n / 2 - .5)) * height for line, y in", "in range(candidates): # Generate a candidate from this sample a = uniform(0, 2", "as pd from scipy import stats from scipy.spatial import distance from psychopy.visual import", "on contrast and generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1 = p.dist_sds", "stim_size) xy[:, 0] += center[0] xy[:, 1] += center[1] self.box = Rect(exp.win, size", "- l0 return llr # --- Exeperiment execution def run_trial(exp, info): \"\"\"Function that", "# Assign gaps between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim =", "the run!\"] prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir,", "the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color", "type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix", "def compute_llr(c, p): \"\"\"Signed LLR of pulse based on contrast and generating distributions.\"\"\"", "l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return llr # ---", "= np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to each trial total_iti =", "= wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr,", "frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\",", "= \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result,", "n, i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd = np.inf expected_acc", "return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation breaks", "attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial", "be only part of a run.\"\"\" if rng is None: rng = np.random.RandomState()", "TODO Track fixation breaks here? Also in the remote? if self.trial_data: data =", "parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def", "all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, )", "exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\",", "trial_duration=trial_duration, start_time=start_time, ) # Generate information for each trial for trial, trial_info in", "interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True,", "return locals() def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" # TODO let", "trial and pulse train info.\"\"\" # TODO let us set random number generator", "# Start at a fixed point we know will work start = 0,", "np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the first random", "+ stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = []", "disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data, _ in exp.trial_data]", "Use the first random sample if we're not being precise # about the", "~~~ Stimulus period for p, info in p_info.iterrows(): # Allow aborts in the", "exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find", "= np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and in_ring: # Accept the", "all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) #", "numpy as np import pandas as pd from scipy import stats from scipy.spatial", "def not_in_range(val, limits): \"\"\"False if val is outside of limits.\"\"\" return val <", "TODO do we want to wait a smidge if they were blinking before", "in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints, p,", "= \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n = rows.sum() log_contrast[rows]", "they were blinking before # showing the stimulus? How much vision do people", "# Check if the eye is blinking and possibly wait a bit if", "run!\"] prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\"))", "= Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1):", "dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p):", "frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast", "rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make more general and move", "balanced set of trials, might be only part of a run.\"\"\" if rng", "< size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array", "out until we get a good solution for saving these # Currently it", "subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information that's not part of the", "fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False,", "exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] +=", "targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating", "do we want to wait a smidge if they were blinking before #", "rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur", "2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure", "= len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([", "= data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else: return None, None def", "= target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc", "exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp,", "1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of first flip", "for c in pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps between pulses", "__init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size", "in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else: return", "trial for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info,", "Add in name information that matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session,", "\"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response)", "rng.uniform randint = rng.randint # Start at a fixed point we know will", "wait_iti[0] = 0 total_iti = wait_iti.sum() # Use the first random sample if", "\"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time =", "-1 * pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd", "for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i)", "so they are more intuitively organized? return trial_info, pulse_info # --- Support functions", "p_info.iterrows(): # Allow aborts in the middle of a trial exp.check_abort() # Update", "stim_pos = np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos =", "count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max", "not being precise # about the overall time of the run (i.e. in", "> radius) if in_array and in_ring: # Accept the candidate samples.append((x, y)) queue.append((x,", "rng = np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial components # Assign", "\"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\",", "the run (i.e. in psychophys rig) if not p.keep_on_time: break # --- Build", "= np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist", "= pulse_dist == i n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast)", "Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch", "--- Build the trial_info structure trial = np.arange(1, n_trials + 1) trial_info =", "return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse", "for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information", "Assign initial ITI to each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti", "fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- # def", "the run (i.e. in psychophys rig) if not p.keep_on_time: break # Assign pulse", "height = .5 heights = (np.arange(n)[::-1] - (n / 2 - .5)) *", "saving these # Currently it errors out (maybe because the info df isn't", "performance.\"\"\" lines = [\"End of the run!\"] prior_trials = prior_correct = 0 output_dir", "StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True)", "if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink", "(according to Eyelink?) # TODO can we make life easier later by updating", "mass given truncated geometric distribution.\"\"\" a, b = min(support) - 1, max(support) dist", "run=exp.p.run ) # Add in information that's not part of the saved design", "if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're", "Stimulus period for p, info in p_info.iterrows(): # Allow aborts in the middle", "import stats from scipy.spatial import distance from psychopy.visual import TextStim, Rect from visigoth.stimuli", "all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information that matches", "flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter is", "Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist =", "def save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data =", "number of times the same value repeats in sequence.\"\"\" s = pd.Series(s) switch", "p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each frame of the stimulus", "= 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7,", "StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\",", "----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True)", "all_trials, all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1): trial_part, pulse_part", "got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials,", "pulse based on contrast and generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1", "import distance from psychopy.visual import TextStim, Rect from visigoth.stimuli import Point, Points, PointCue,", "check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims,", "exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"],", "elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95, n)", "define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float)", "exp.p.acceleration # Add in name information that matches across tables all_trials = all_trials.assign(", ") # Add in information that's not part of the saved design gen_dist", "TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\"", "all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed", "experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i in", "]) llr_mean = np.inf llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range)", "> constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target to a side gen_dist", "Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler", "= all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols} ) all_pulses =", "if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period", "exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info,", "solution for saving these # Currently it errors out (maybe because the info", "Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def", "# We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self,", "exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\")", "exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s) for pat, c in zip(patterns,", "llr = l1 - l0 return llr # --- Exeperiment execution def run_trial(exp,", "Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating stimulus pattern =", "commenting out until we get a good solution for saving these # Currently", "number generator somehow. Command line? # Build the full experimental design constraints =", "out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with", "r = uniform(radius, 2 * radius) x, y = s_x + r *", "in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) + 1 for c in", "t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\")", "p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the eye is blinking and possibly", "2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts to", "p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p,", "do people have right when # they come out of the blink (according", "breaks here? Also in the remote? if self.trial_data: data = pd.DataFrame([t for t,", "for i in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] +=", "p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd =", "= [\"End of the run!\"] prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem)", "os import json from glob import glob import numpy as np import pandas", "total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct),", "trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints,", "target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 *", "of multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf,", "res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play()", "t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\",", "= np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i", "self.trial_data: data = pd.DataFrame([t for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses", "samples = [start] queue = [start] while queue: # Pick a sample to", "total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti:", "the remote.\"\"\" t_info, _ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance", "= exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not", "= uniform(0, 2 * np.pi) r = uniform(radius, 2 * radius) x, y", "a bit if so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation():", "+ \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # -----------------------------------------------------------------------", "np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs,", "counter is reset at beginning of frame_range # so it should correspond to", "np.concatenate([ np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1) ]) pulse =", "if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame", "= [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials =", "= exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\",", "# # Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color", "p_info.loc[p, \"blink\"] |= blink # This counter is reset at beginning of frame_range", "inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package", "come out of the blink (according to Eyelink?) # TODO can we make", "vision do people have right when # they come out of the blink", "Add trial-level information computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses =", "# Assign the stimulus to a side stim_pos = np.repeat([0, 1], n_trials //", "Start at a fixed point we know will work start = 0, 0", "= pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname", "= pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem +", "queue[s_idx] for i in range(candidates): # Generate a candidate from this sample a", "exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average", "is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime()", "expected_count_dist)) # Assign initial ITI to each trial total_iti = np.inf while not_in_range(total_iti,", "if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime()", "* np.cos(a), s_y + r * np.sin(a) # Check the three conditions to", "candidates: # We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def", "Generate information for each trial for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"]", "randint(len(queue)) s_x, s_y = queue[s_idx] for i in range(candidates): # Generate a candidate", "1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign", "= PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius,", "max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) #", "= p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)),", "--- Assign trial components # Assign the stimulus to a side stim_pos =", "generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of trials, might be only part", "stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0]", "\"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~", "t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for p, info in p_info.iterrows(): #", "Allow aborts in the middle of a trial exp.check_abort() # Update the pattern", "llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count)", "gen_dist = rng.permutation(gen_dist) # Assign pulse counts to each trial count_support = np.arange(p.pulse_count[-1],", "~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res =", "np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) #", "# Generate a candidate from this sample a = uniform(0, 2 * np.pi)", "only part of a run.\"\"\" if rng is None: rng = np.random.RandomState() n_trials", "+ 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error =", "+= 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time", "def __init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size =", "line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\" if", "target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields", "right when # they come out of the blink (according to Eyelink?) #", "np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n, i", "frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\"", "gap duration # information or are we just going to have to deal?", "showing the stimulus? How much vision do people have right when # they", "in p_info.iterrows(): # Allow aborts in the middle of a trial exp.check_abort() #", "np.random.RandomState() uniform = rng.uniform randint = rng.randint # Start at a fixed point", "1] += center[1] self.box = Rect(exp.win, size + stim_size, size + stim_size, pos=center,", "allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] =", "# Assign the target to a side gen_dist = np.repeat([0, 1], n_trials //", "import numpy as np import pandas as pd from scipy import stats from", "pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps between pulses run_duration = np.inf", "* n_pulses, ]) # Use the first random sample if we're not being", "for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] =", "= min(support) - 1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b)", "break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want to wait a smidge", "pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of trials, might be", "exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for", "to wait a smidge if they were blinking before # showing the stimulus?", "self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else: return None,", "will be filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\",", "\"\"\"Package trial information for the remote.\"\"\" t_info, _ = info return t_info.to_json() def", "trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum())", "if the eye is blinking and possibly wait a bit if so blink_pad_start", "pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed from pulse-level table all_trials =", "trial exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting", "TODO reorder the columns so they are more intuitively organized? return trial_info, pulse_info", "constraints.trials_per_run # --- Assign trial components # Assign the stimulus to a side", "last pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period", "run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct", "0, 0 samples = [start] queue = [start] while queue: # Pick a", "run (i.e. in psychophys rig) if not p.keep_on_time: break # Assign pulse intensities", "rng is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial", "organized? return trial_info, pulse_info # --- Support functions for block generation def not_in_range(val,", "s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given", "= exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\",", "positions using poisson-disc sampling.\"\"\" # TODO make more general and move into visigoth", "grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) )", "[0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame", "# ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res", "not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti", "= all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some components for training all_trials[\"wait_pre_stim\"]", "gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in", "matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign(", "total_iti = wait_iti.sum() # Use the first random sample if we're not being", "b = min(support) - 1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) /", "t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses", "trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update the", "return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period", "random number generator somehow. Command line? # Build the full experimental design constraints", "compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation breaks here? Also in", "pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma", "0] += center[0] xy[:, 1] += center[1] self.box = Rect(exp.win, size + stim_size,", "period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames:", "np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the", "return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report of", "we know will work start = 0, 0 samples = [start] queue =", "f in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct", "rng.permutation(stim_pos) # Assign the target to a side gen_dist = np.repeat([0, 1], n_trials", "stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components # Map from trial", "in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i", "# Add in blank fields that will be filled in later empty_cols =", "occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed from", "= stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed", "name information that matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run )", "or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist =", "+= center[1] self.box = Rect(exp.win, size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color,", "compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd", "= [start] while queue: # Pick a sample to expand from s_idx =", "gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp,", "[p_data for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem +", "np.concatenate([ np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist) ]) llr_mean =", "exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color =", "stimulus objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial", "glob import glob import numpy as np import pandas as pd from scipy", "\"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct],", "blinking before # showing the stimulus? How much vision do people have right", "trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials =", "& (np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius)", "responded=False, **{col: np.nan for col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False,", "i in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist ==", "+ 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # ---", "(not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]:", "stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size,", "= Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius,", "exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\")", "in name information that matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run", "the columns so they are more intuitively organized? return trial_info, pulse_info # ---", "prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames])", "l0 return llr # --- Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes", "exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets", "= constraints.trials_per_run # --- Assign trial components # Assign the stimulus to a", "i, c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) + 1 for", "import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined at runtime.\"\"\"", "+ 1 for c in pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps", "= info # ~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos]", "- 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials)", "y = s_x + r * np.cos(a), s_y + r * np.sin(a) #", "= [p_data for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem", "from scipy import stats from scipy.spatial import distance from psychopy.visual import TextStim, Rect", "expected_count_dist = count_pmf * n_trials count_error = np.inf while count_error > constraints.sum_count_error: pulse_count", "mean_acc, responses else: return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject", "until we get a good solution for saving these # Currently it errors", "switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability", "dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse based on contrast and generating", "= trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error = np.inf while count_error", "not p.keep_on_time: break # Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast", "data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else: return None, None def show_performance(exp,", "cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos,", "\"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in", "sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95, n) m, s", "= np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap,", "poisson-disc sampling.\"\"\" # TODO make more general and move into visigoth # TODO", "run_correct is not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials", "= count_pmf * n_trials count_error = np.inf while count_error > constraints.sum_count_error: pulse_count =", "p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c))", "trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign(", "remote? if self.trial_data: data = pd.DataFrame([t for t, _ in self.trial_data]) mean_acc =", "# TODO do we want to wait a smidge if they were blinking", "trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\"))", "in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"]", "empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials", "run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur =", ".95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m,", "pulse train info.\"\"\" # TODO let us set random number generator somehow. Command", "class StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf * 2", "generator somehow. Command line? # Build the full experimental design constraints = Bunch(exp.p.design_constraints)", "\"cue\", \"targets\"]) # TODO do we want to wait a smidge if they", "in_array = (np.abs(x) < size / 2) & (np.abs(y) < size / 2)", "dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed from pulse-level table all_trials", "exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is", "sampling.\"\"\" # TODO make more general and move into visigoth # TODO currently", "if res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"]", ")) # --- Assign trial components # Map from trial to pulse trial", "y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output", "computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur =", "interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial", "c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) + 1 for c", "draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color", "generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses", "exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for", ") # Generate information for each trial for trial, trial_info in all_trials.iterrows(): pulse_info", "> constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts to each trial count_support", "xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n =", "xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1] += center[1] self.box", "= exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:,", "# ----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\",", "AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to", "* np.sin(a) # Check the three conditions to accept the candidate in_array =", "exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] =", "n_pulses, ]) # Use the first random sample if we're not being precise", "flip_time # ~~~ Stimulus period for p, info in p_info.iterrows(): # Allow aborts", "- .5)) * height for line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0,", "in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want", "np.pi) r = uniform(radius, 2 * radius) x, y = s_x + r", "drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] +", "Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp),", "max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean", "count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf", "d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr", "the subject a report of their performance.\"\"\" lines = [\"End of the run!\"]", "pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95, n) m, s =", "dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if", "of a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() #", "columns so they are more intuitively organized? return trial_info, pulse_info # --- Support", "exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10", "pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean =", "out of the blink (according to Eyelink?) # TODO can we make life", "= np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0]", "def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" # TODO let us set", "**{col: np.nan for col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan,", "= np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial", "= np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the first", "queue: # Pick a sample to expand from s_idx = randint(len(queue)) s_x, s_y", "sample to expand from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for i", "None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True)", "= np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build", "somehow. Command line? # Build the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials,", "a, b = min(support) - 1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support)", "= exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast", "all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level", "exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we get a good", "in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames =", "check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO", "blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed from pulse-level table", "llr # --- Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes what happens", "exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else:", "\"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col:", "filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\",", "rng.permutation(gen_dist) # Assign pulse counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) +", "l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return llr #", "TODO let us set random number generator somehow. Command line? # Build the", "a candidate from this sample a = uniform(0, 2 * np.pi) r =", "+ r * np.cos(a), s_y + r * np.sin(a) # Check the three", "trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of trials, might", "exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if", "limits[1] def max_repeat(s): \"\"\"Maximumum number of times the same value repeats in sequence.\"\"\"", "\"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus", "\"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n = len(lines) height = .5", "all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information that matches across tables all_trials", "pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we get a", "trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b = min(support) -", "pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns)", "responses = data[\"responded\"].sum() return mean_acc, responses else: return None, None def show_performance(exp, run_correct,", "distance from psychopy.visual import TextStim, Rect from visigoth.stimuli import Point, Points, PointCue, Pattern", "be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize", "in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"]", "/ 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1] +=", "= rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr", "1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def", "= os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f", "= \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time", "# Currently it errors out (maybe because the info df isn't seeded?) #", ") all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will be filled", "pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period #", "exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses =", "\"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col", "check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\",", "\"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so they", "while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target to a", "p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink #", "rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist))", "[] for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf,", "constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration =", "want to wait a smidge if they were blinking before # showing the", "= flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr,", "Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self,", "feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color", "np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) #", "by updating the gap duration # information or are we just going to", "p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error", "the candidate in_array = (np.abs(x) < size / 2) & (np.abs(y) < size", "= pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct =", "heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height for line,", ".5 heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height for", "so it should correspond to frames dropped during the stim p_info.loc[p, \"dropped_frames\"] =", "rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() # Use the first", "run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n = len(lines) height", "flip_time # ~~~ Response period # Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"]", "http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState() uniform = rng.uniform randint =", "\"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos", "PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color)", "return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if", "t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation breaks here?", "See http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState() uniform = rng.uniform randint", "= exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4)", "\"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0,", "glob import numpy as np import pandas as pd from scipy import stats", "* pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd =", "n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p)", "log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"]", "tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session,", "is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start):", "run_trials): \"\"\"Show the subject a report of their performance.\"\"\" lines = [\"End of", "]) if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\",", "\"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This", "return llr # --- Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes what", "t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for p, info", "the offset of the last pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time", "\"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims =", "in each trial.\"\"\" t_info, p_info = info # ~~~ Set trial-constant attributes of", "exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True)", "for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp,", "the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True):", "t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"]", "candidate in_array = (np.abs(x) < size / 2) & (np.abs(y) < size /", "# Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue", "exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s) for pat, c in", "parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation", "n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() # Use the", "\"\"\"Generated a balanced set of trials, might be only part of a run.\"\"\"", "at {:.0%} correct today!\".format(total_correct), ]) n = len(lines) height = .5 heights =", "pulse_offset=np.nan, ) # Add trial-level information computed from pulse-level table all_trials = all_trials.set_index(\"trial\",", "= trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so they are more intuitively", "len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\",", "import glob import numpy as np import pandas as pd from scipy import", "= exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) #", "for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True)", "point we know will work start = 0, 0 samples = [start] queue", "= wait_iti.sum() # Use the first random sample if we're not being precise", "p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error = np.inf while count_error > constraints.sum_count_error:", "trial components # Map from trial to pulse trial = np.concatenate([ np.full(c, i,", "onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix,", "np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)):", "val < limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum number of times", "if so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\",", "~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos]", "trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials =", "--- Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes what happens in each", "responses else: return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a", "elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95,", "(prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at", "exp.clock.getTime() - blink_pad_start # Show each frame of the stimulus for frame in", "components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name", "a sample to expand from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for", "isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the eye is", "pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part,", "for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims,", "while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0,", "We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp,", "\"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] =", "= [start] queue = [start] while queue: # Pick a sample to expand", "]) n_pulses = pulse_count.sum() # Assign gaps between pulses run_duration = np.inf while", "flip as the offset of the last pulse if not frame: p_info.loc[p, \"pulse_offset\"]", "offset of the last pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time #", "make life easier later by updating the gap duration # information or are", "= pulse_count.sum() # Assign gaps between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range):", "repeats in sequence.\"\"\" s = pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max()", "break if (i + 1) == candidates: # We've exhausted the particular sample", "between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng)", "design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist),", "]) n = len(lines) height = .5 heights = (np.arange(n)[::-1] - (n /", "\"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1])", "queue.append((x, y)) break if (i + 1) == candidates: # We've exhausted the", "out (maybe because the info df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases", "def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius,", "target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2 * pulse_count) expected_acc =", "stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR", "exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:,", "np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to each trial total_iti = np.inf", "* height for line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw()", "range(candidates): # Generate a candidate from this sample a = uniform(0, 2 *", "// 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts", "pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([", "np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf", "trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so they are more", "yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of trials,", "if (i + 1) == candidates: # We've exhausted the particular sample queue.pop(s_idx)", "exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos]", "= np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 **", "exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp,", "\"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n = rows.sum() log_contrast[rows] =", "frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] =", "= np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int)", "# information or are we just going to have to deal? p_info.loc[p, \"blink_pad\"]", "= exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix:", "constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1)", "and pulse train info.\"\"\" # TODO let us set random number generator somehow.", "= all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for each", "session=exp.p.session, run=exp.p.run ) # Add in information that's not part of the saved", "n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05,", "np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials", "exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True)", "s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric", "glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials =", "switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a,", "2 stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] +=", "exp.s.pattern.randomize_phases() # TODO commenting out until we get a good solution for saving", "trial-level information computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\")", "+ stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for xy_i in", "in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast =", "life easier later by updating the gap duration # information or are we", "the overall time of the run (i.e. in psychophys rig) if not p.keep_on_time:", "is blinking and possibly wait a bit if so blink_pad_start = exp.clock.getTime() for", "draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0],", "i n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast,", "exp.s.pattern.array.phases # Check if the eye is blinking and possibly wait a bit", "exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True)", "--- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum()", "have right when # they come out of the blink (according to Eyelink?)", "- (n / 2 - .5)) * height for line, y in zip(lines,", "from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for i in range(candidates): #", "len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing", "= np.concatenate([ np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1) ]) pulse", "the timing of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration", "d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr =", "wait_iti.sum() # Use the first random sample if we're not being precise #", "= all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a", "start = 0, 0 samples = [start] queue = [start] while queue: #", "= queue[s_idx] for i in range(candidates): # Generate a candidate from this sample", "exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"]", "= np.random.RandomState() uniform = rng.uniform randint = rng.randint # Start at a fixed", "rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr)", "enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) + 1 for c in pulse_count", "\"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for", "the remote? if self.trial_data: data = pd.DataFrame([t for t, _ in self.trial_data]) mean_acc", "wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the first random sample", "exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame", "in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is", "- dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse based on contrast and", "else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims)", "= np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ])", "= exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem +", "= pd.DataFrame([t for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum()", "0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play()", "flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1", "\"blink\"] |= blink # This counter is reset at beginning of frame_range #", "trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components # Map", "square array # See http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState() uniform", "the candidate samples.append((x, y)) queue.append((x, y)) break if (i + 1) == candidates:", "to expand from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for i in", "# ~~~ Stimulus period for p, info in p_info.iterrows(): # Allow aborts in", "stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for xy_i in xy:", ".5)) * height for line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y),", "pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update", "all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time =", "# ~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos =", "Support functions for block generation def not_in_range(val, limits): \"\"\"False if val is outside", "qs = np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10", "- expected_count_dist)) # Assign initial ITI to each trial total_iti = np.inf while", "n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] -", "t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\" t_info, _", "overall time of the run (i.e. in psychophys rig) if not p.keep_on_time: break", "exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res", "= [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p,", "and generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1 = p.dist_sds d0, d1", "the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info):", "as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code #", "errors out (maybe because the info df isn't seeded?) # p_info.loc[p, \"phases\"] =", "trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part,", "are more intuitively organized? return trial_info, pulse_info # --- Support functions for block", "= flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for p, info in", "= .5 heights = (np.arange(n)[::-1] - (n / 2 - .5)) * height", "for i in range(candidates): # Generate a candidate from this sample a =", "+ pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur,", "import Point, Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch", "wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\")", "\"\"\"Show the subject a report of their performance.\"\"\" lines = [\"End of the", "a fixed point we know will work start = 0, 0 samples =", "not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist = \"norm\",", "0 total_iti = wait_iti.sum() # Use the first random sample if we're not", "the time of first flip as the offset of the last pulse if", "line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp):", "Response period # Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"]", "gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank", "executes what happens in each trial.\"\"\" t_info, p_info = info # ~~~ Set", "return val < limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum number of", "distribution.\"\"\" a, b = min(support) - 1, max(support) dist = stats.geom(p=p, loc=a) return", "self.patterns = patterns = [] for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings,", "flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] =", "[t_data for t_data, _ in exp.trial_data] pulse_data = [p_data for _, p_data in", "exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time:", "[-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"],", "[\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in [0,", "exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1]", "p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix:", "** 2 * pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info", "all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run", "Check the three conditions to accept the candidate in_array = (np.abs(x) < size", "= np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n, i in zip(pulse_count,", "draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\",", "trial information for the remote.\"\"\" t_info, _ = info return t_info.to_json() def compute_performance(self):", "exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True):", "should correspond to frames dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for", "more general and move into visigoth # TODO currently assumes square array #", "elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield trial", "= all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration =", "if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] =", "given truncated geometric distribution.\"\"\" a, b = min(support) - 1, max(support) dist =", "t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time", "draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] +", "Assign the target to a side gen_dist = np.repeat([0, 1], n_trials // 2)", "have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each frame", "check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] =", "range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] +=", "demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims =", "Map from trial to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for i,", "[\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign(", "we want to wait a smidge if they were blinking before # showing", "of pulse based on contrast and generating distributions.\"\"\" m0, m1 = p.dist_means s0,", "indent=4) # ----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\",", "generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" # TODO let us set random", "the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\")", "+ trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate", "= flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses,", "= s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass", "** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0)", "= flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error =", "the stimulus to a side stim_pos = np.repeat([0, 1], n_trials // 2) while", "about the overall time of the run (i.e. in psychophys rig) if not", "part of a run.\"\"\" if rng is None: rng = np.random.RandomState() n_trials =", "def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b = min(support)", "radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make more general", "pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur", "Set trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] #", "not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows", "+ \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname,", "samples.append((x, y)) queue.append((x, y)) break if (i + 1) == candidates: # We've", "full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i", "l1 - l0 return llr # --- Exeperiment execution def run_trial(exp, info): \"\"\"Function", ")) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info", "expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for", "pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum()", "= np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial components # Assign the", "today!\".format(total_correct), ]) n = len(lines) height = .5 heights = (np.arange(n)[::-1] - (n", "Add in information that's not part of the saved design gen_dist = all_trials[\"gen_dist\"]", "exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False)", "# --- Support functions for block generation def not_in_range(val, limits): \"\"\"False if val", "if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial", ".loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so", "# TODO commenting out until we get a good solution for saving these", "radius) x, y = s_x + r * np.cos(a), s_y + r *", "[(x, y)]) > radius) if in_array and in_ring: # Accept the candidate samples.append((x,", "trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") #", "stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for", "# Allow aborts in the middle of a trial exp.check_abort() # Update the", "sequence.\"\"\" s = pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1", "poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make more", "= (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, )", "(i + 1) == candidates: # We've exhausted the particular sample queue.pop(s_idx) return", "exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 **", "from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser):", "correspond to frames dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame", "Adjust the timing of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /=", "wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the first random sample if", "to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data, _ in", "files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data, _", "the three conditions to accept the candidate in_array = (np.abs(x) < size /", "for each trial for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy()", "flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(),", "gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that", "trial = np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int),", "pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error", "\"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b = min(support) - 1, max(support)", "trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur", "* pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info", "# --- Build the trial_info structure trial = np.arange(1, n_trials + 1) trial_info", "of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval", "\"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos =", "count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign", "Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in", "for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] =", "None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play()", "= all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run )", "training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information that", "gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add", "if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want to wait", "# Average of multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask,", "let us set random number generator somehow. Command line? # Build the full", "draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color", "# Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now", "constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist", "\"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play()", "= StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\",", "is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial components", "all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will be filled in", "frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not", "zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files", "= patterns = [] for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex,", "components # Map from trial to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int)", "sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield trial and pulse", "\"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False)", "\"\"\"Signed LLR of pulse based on contrast and generating distributions.\"\"\" m0, m1 =", "exp, center, dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size /", "all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for each trial", "exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\",", "generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints, exp.p)", "== trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set", "constraints.acc_range)): for i in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows =", "all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"]", "# Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info", "not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] = flip_time blink =", "\"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- #", "\"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\",", "= np.concatenate([ np.arange(c) + 1 for c in pulse_count ]) n_pulses = pulse_count.sum()", "not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter is reset at beginning", "exp.p.target_color) # Average of multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex,", "yield_skipped=True) for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\")", "Accept the candidate samples.append((x, y)) queue.append((x, y)) break if (i + 1) ==", "# Add trial-level information computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses", "{:.0%} correct today!\".format(total_correct), ]) n = len(lines) height = .5 heights = (np.arange(n)[::-1]", "time of first flip as the offset of the last pulse if not", "\"phases\"] = exp.s.pattern.array.phases # Check if the eye is blinking and possibly wait", "heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to", "exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"])", "0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\",", "targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating stimulus", "Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\",", "exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial", "None: rng = np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial components #", "value repeats in sequence.\"\"\" s = pd.Series(s) switch = s != s.shift(1) return", "exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims,", "check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime()", "all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur)", "LLR of pulse based on contrast and generating distributions.\"\"\" m0, m1 = p.dist_means", "np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n", "all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information that's not", "2 - .5)) * height for line, y in zip(lines, heights): TextStim(exp.win, line,", "else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color", "\"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p,", "# Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist", "None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report of their performance.\"\"\"", "wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will be", "y)) queue.append((x, y)) break if (i + 1) == candidates: # We've exhausted", "n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) #", "currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng is None: rng =", "= randint(len(queue)) s_x, s_y = queue[s_idx] for i in range(candidates): # Generate a", "= np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf *", "not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration", "the trial_info structure trial = np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial,", "= Rect(exp.win, size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns =", "info): \"\"\"Function that executes what happens in each trial.\"\"\" t_info, p_info = info", "len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) #", "llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or", "all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced", "np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return llr # --- Exeperiment execution", "prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data", "= [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos in", "the info df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if", "check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True)", "pat, c in zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw() for p", "/ np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n,", "+ \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as", "= np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return llr # --- Exeperiment", "using poisson-disc sampling.\"\"\" # TODO make more general and move into visigoth #", "# See http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState() uniform = rng.uniform", "1) ]) pulse = np.concatenate([ np.arange(c) + 1 for c in pulse_count ])", "trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist =", "in pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps between pulses run_duration =", "not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials):", "max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts to each trial", "outside of limits.\"\"\" return val < limits[0] or val > limits[1] def max_repeat(s):", "= stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial,", "duration # information or are we just going to have to deal? p_info.loc[p,", "of first flip as the offset of the last pulse if not frame:", "structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info", "at a fixed point we know will work start = 0, 0 samples", "a smidge if they were blinking before # showing the stimulus? How much", "output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for", "just going to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start #", "p, rng=None): \"\"\"Generated a balanced set of trials, might be only part of", "blinking and possibly wait a bit if so blink_pad_start = exp.clock.getTime() for frame", "exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data", "np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] =", "scipy.spatial import distance from psychopy.visual import TextStim, Rect from visigoth.stimuli import Point, Points,", "of times the same value repeats in sequence.\"\"\" s = pd.Series(s) switch =", "\"\"\"Function that executes what happens in each trial.\"\"\" t_info, p_info = info #", "t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color", "frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames", "seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the eye is blinking", "# Accept the candidate samples.append((x, y)) queue.append((x, y)) break if (i + 1)", "from __future__ import division import os import json from glob import glob import", "# --- Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes what happens in", "# TODO make more general and move into visigoth # TODO currently assumes", "\"\"\"False if val is outside of limits.\"\"\" return val < limits[0] or val", "is None: rng = np.random.RandomState() uniform = rng.uniform randint = rng.randint # Start", "check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using", "for p, info in p_info.iterrows(): # Allow aborts in the middle of a", "Show each frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True):", "Record the time of first flip as the offset of the last pulse", "# they come out of the blink (according to Eyelink?) # TODO can", "0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True)", "or are we just going to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime()", "TODO make more general and move into visigoth # TODO currently assumes square", "# Generate information for each trial for trial, trial_info in all_trials.iterrows(): pulse_info =", "t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color", "# Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color =", "[0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n", "\"targets\"]) # TODO do we want to wait a smidge if they were", "exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information", "2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target to", "uniform = rng.uniform randint = rng.randint # Start at a fixed point we", "session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in", "locals() def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" # TODO let us", "frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period # Collect the response", "sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0)", "in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\",", "if they were blinking before # showing the stimulus? How much vision do", "n_trials = constraints.trials_per_run # --- Assign trial components # Assign the stimulus to", "= rng.permutation(gen_dist) # Assign pulse counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max)", "conditions to accept the candidate in_array = (np.abs(x) < size / 2) &", "# Add in name information that matches across tables all_trials = all_trials.assign( subject=exp.p.subject,", "if we're not being precise # about the overall time of the run", "pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components # Map from trial to", "can we make life easier later by updating the gap duration # information", "exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset", "trial components # Assign the stimulus to a side stim_pos = np.repeat([0, 1],", "remote.\"\"\" t_info, _ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\"", "the stimulus? How much vision do people have right when # they come", "pulse_data = [p_data for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname =", "# ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\",", "zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw() for p in self.patterns: p.draw()", "= target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma ** 2", "True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink", "all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some components", "= rng.permutation(stim_pos) # Assign the target to a side gen_dist = np.repeat([0, 1],", "AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters", "check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims)", "Eyelink?) # TODO can we make life easier later by updating the gap", "execution def run_trial(exp, info): \"\"\"Function that executes what happens in each trial.\"\"\" t_info,", "run-wise performance information.\"\"\" # TODO Track fixation breaks here? Also in the remote?", "= trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() +", "default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix = Point(exp.win,", "information for the remote.\"\"\" t_info, _ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute", "log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr", "prior_correct = prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\", \"You got {:.0%}", "we make life easier later by updating the gap duration # information or", "exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"]", "information that's not part of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials =", "x, y = s_x + r * np.cos(a), s_y + r * np.sin(a)", "/ (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse based on", "now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler,", "exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"]", "for saving these # Currently it errors out (maybe because the info df", "visigoth # TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng is", "= generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1): trial_part, pulse_part = generate_block(constraints,", "\"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation breaks here? Also in the", "frame_range # so it should correspond to frames dropped during the stim p_info.loc[p,", "info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we get a good solution for", "and exp.p.save_data: trial_data = [t_data for t_data, _ in exp.trial_data] pulse_data = [p_data", "= exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size)", "from this sample a = uniform(0, 2 * np.pi) r = uniform(radius, 2", "elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield trial and", "minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to", "----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"],", "side stim_pos = np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos", "block generation def not_in_range(val, limits): \"\"\"False if val is outside of limits.\"\"\" return", "correct today!\".format(total_correct), ]) n = len(lines) height = .5 heights = (np.arange(n)[::-1] -", "to Eyelink?) # TODO can we make life easier later by updating the", "res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in", "<filename>fmri/experiment.py<gh_stars>0 from __future__ import division import os import json from glob import glob", "psychopy.visual import TextStim, Rect from visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth", "these # Currently it errors out (maybe because the info df isn't seeded?)", "it should correspond to frames dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames", "# TODO Track fixation breaks here? Also in the remote? if self.trial_data: data", "later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"]", "Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal", "train info.\"\"\" # TODO let us set random number generator somehow. Command line?", "_ in exp.trial_data] pulse_data = [p_data for _, p_data in exp.trial_data] data =", "= (np.abs(x) < size / 2) & (np.abs(y) < size / 2) in_ring", "Add in blank fields that will be filled in later empty_cols = [\"onset_fix\",", "here? Also in the remote? if self.trial_data: data = pd.DataFrame([t for t, _", "# Use the first random sample if we're not being precise # about", "what happens in each trial.\"\"\" t_info, p_info = info # ~~~ Set trial-constant", "!= s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated", "n_trials count_error = np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng,", "= np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti,", "[start] queue = [start] while queue: # Pick a sample to expand from", "PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def", "run_trial(exp, info): \"\"\"Function that executes what happens in each trial.\"\"\" t_info, p_info =", "exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return t_info,", "/ 2) & (np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)])", "count_pmf * n_trials count_error = np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count,", "scipy import stats from scipy.spatial import distance from psychopy.visual import TextStim, Rect from", "np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI", "xy[:, 0] += center[0] xy[:, 1] += center[1] self.box = Rect(exp.win, size +", "if rng is None: rng = np.random.RandomState() uniform = rng.uniform randint = rng.randint", "xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern)", "sample a = uniform(0, 2 * np.pi) r = uniform(radius, 2 * radius)", "in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and in_ring: # Accept", "rng is None: rng = np.random.RandomState() uniform = rng.uniform randint = rng.randint #", "if self.trial_data: data = pd.DataFrame([t for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean()", "Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1): trial_part,", "[\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"]", "Also in the remote? if self.trial_data: data = pd.DataFrame([t for t, _ in", "- 1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a))", "= glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials", "frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\"", "gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur *", "gaps between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials,", "= pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components", "trial to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for i, c in", "all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will be filled in later empty_cols", "t_info, p_info = info # ~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos", "1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame:", "+ 1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to each", "llr_mean = np.inf llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or", "= s_x + r * np.cos(a), s_y + r * np.sin(a) # Check", "each trial.\"\"\" t_info, p_info = info # ~~~ Set trial-constant attributes of the", "trial.\"\"\" t_info, p_info = info # ~~~ Set trial-constant attributes of the stimuli", "from psychopy.visual import TextStim, Rect from visigoth.stimuli import Point, Points, PointCue, Pattern from", "if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\", \"You", "geometric distribution.\"\"\" a, b = min(support) - 1, max(support) dist = stats.geom(p=p, loc=a)", "pos=(0, y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data", "pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration,", "r * np.cos(a), s_y + r * np.sin(a) # Check the three conditions", "of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds,", "stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"]", "# p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the eye is blinking and", "gen_dist) ]) llr_mean = np.inf llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean,", "+= center[0] xy[:, 1] += center[1] self.box = Rect(exp.win, size + stim_size, size", "gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info structure", "response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res", "# about the overall time of the run (i.e. in psychophys rig) if", "p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\" t_info, _ =", "= 10 ** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos", "col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan,", "pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p):", "deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each frame of the", "objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue", "all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses", "fixed point we know will work start = 0, 0 samples = [start]", "[start] while queue: # Pick a sample to expand from s_idx = randint(len(queue))", "from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined", "# Assign pulse counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1", "* 2 stim_size = exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0]", "np.cos(a), s_y + r * np.sin(a) # Check the three conditions to accept", "trial_info, pulse_info # --- Support functions for block generation def not_in_range(val, limits): \"\"\"False", "prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if", "c in pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps between pulses run_duration", "exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time #", "= pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # ---", "None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report of their", "visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add", "Assign pulse counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf", "{:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials])", "exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix)", "= all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(),", "across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject,", "1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b =", ") return locals() def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" # TODO", "candidate samples.append((x, y)) queue.append((x, y)) break if (i + 1) == candidates: #", "if exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data, _ in exp.trial_data] pulse_data", ".groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the", "= exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time", "s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for i in range(candidates): # Generate", "draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 **", "Track fixation breaks here? Also in the remote? if self.trial_data: data = pd.DataFrame([t", "point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win,", "\"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code", "len(lines) height = .5 heights = (np.arange(n)[::-1] - (n / 2 - .5))", "exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data =", "exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the", "uniform(radius, 2 * radius) x, y = s_x + r * np.cos(a), s_y", "start_time=start_time, ) # Generate information for each trial for trial, trial_info in all_trials.iterrows():", "i, dtype=np.int) for n, i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd", "ignore_index=True) # Adjust the timing of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration", ".rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns so they are", "loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of", "_ = info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO", "\"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols} )", "exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim,", "\"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make more general and move into", "s_x + r * np.cos(a), s_y + r * np.sin(a) # Check the", "gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components # Map from", "def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report of their performance.\"\"\" lines", "1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error = np.inf", "ITI to each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti,", ".sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder the columns", "subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add", "trials, might be only part of a run.\"\"\" if rng is None: rng", "when # they come out of the blink (according to Eyelink?) # TODO", "in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n", "= [t_data for t_data, _ in exp.trial_data] pulse_data = [p_data for _, p_data", "exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\"", "of a run.\"\"\" if rng is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run", "# TODO let us set random number generator somehow. Command line? # Build", "timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info", "three conditions to accept the candidate in_array = (np.abs(x) < size / 2)", "flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of first flip as", "functions for block generation def not_in_range(val, limits): \"\"\"False if val is outside of", "pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) #", "n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse", "psychophys rig) if not p.keep_on_time: break # Assign pulse intensities max_contrast = np.log10(1", "pulse_dist == i n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr", "the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast,", "if not p.keep_on_time: break # Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings))", "Build the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p)", "while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count,", "exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade", "5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1] += center[1]", "trial_info structure trial = np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist,", "timing of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration #", "Rect from visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget,", "TODO commenting out until we get a good solution for saving these #", "pandas as pd from scipy import stats from scipy.spatial import distance from psychopy.visual", "prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data) if prior_trials:", "all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for", "val is outside of limits.\"\"\" return val < limits[0] or val > limits[1]", "trial = np.concatenate([ np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1) ])", "information or are we just going to have to deal? p_info.loc[p, \"blink_pad\"] =", "expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info = pd.DataFrame(dict(", "= np.concatenate([ np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist) ]) llr_mean", "in zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw() for p in self.patterns:", "= exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res))", "weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n = len(lines)", "~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end,", "t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color #", "pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True)", "llr_mean = target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum() dv_sd = np.sqrt(constraints.sigma **", "= now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\")", "p_info = info # ~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos =", "= flip_time # ~~~ Response period # Collect the response now = exp.clock.getTime()", "trial_data = [t_data for t_data, _ in exp.trial_data] pulse_data = [p_data for _,", "n = len(lines) height = .5 heights = (np.arange(n)[::-1] - (n / 2", "1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign", "# Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets", "for block generation def not_in_range(val, limits): \"\"\"False if val is outside of limits.\"\"\"", "patterns = [] for xy_i in xy: pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask,", "Average of multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size,", "= (np.arange(n)[::-1] - (n / 2 - .5)) * height for line, y", "log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n, i in", "min(support) - 1, max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) -", "height for line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip()", "\"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) #", "# Record the time of first flip as the offset of the last", "to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp):", "# showing the stimulus? How much vision do people have right when #", "p, info in p_info.iterrows(): # Allow aborts in the middle of a trial", "center[1] self.box = Rect(exp.win, size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\")", "TextStim, Rect from visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth import AcquireFixation,", "dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur,", "= rng.uniform randint = rng.randint # Start at a fixed point we know", "to each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials,", "a run.\"\"\" if rng is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run #", "prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not None:", "extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int)", "for col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan,", "of frame_range # so it should correspond to frames dropped during the stim", "= exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname,", "= info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track", "exp.p.stim_size / 5 xy = poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1]", "later by updating the gap duration # information or are we just going", "s) for pat, c in zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw()", "flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max + 1) count_error = np.sum(np.abs(count_dist[count_support]", "stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return", "dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist == i n = rows.sum()", "type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos,", "in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for", "# ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else:", "def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of trials, might be only", "= True p_info.loc[p, \"pulse_onset\"] = flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |=", "count_error = np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int)", "lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n = len(lines) height =", "not part of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means,", "= all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback,", "* np.pi) r = uniform(radius, 2 * radius) x, y = s_x +", "np import pandas as pd from scipy import stats from scipy.spatial import distance", "a side gen_dist = np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat:", "exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information that matches across tables", "= exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return", "How much vision do people have right when # they come out of", "p): \"\"\"Probability mass given truncated geometric distribution.\"\"\" a, b = min(support) - 1,", "= np.inf llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd,", "middle of a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases()", "pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for each trial for trial, trial_info", "= Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def", "that matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses =", "= exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare", "limits): \"\"\"False if val is outside of limits.\"\"\" return val < limits[0] or", "is reset at beginning of frame_range # so it should correspond to frames", "a side stim_pos = np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat:", "== candidates: # We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object):", "[+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play()", "return np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf", "Pick a sample to expand from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx]", "for line, y in zip(lines, heights): TextStim(exp.win, line, pos=(0, y), height=height).draw() exp.win.flip() def", "pd from scipy import stats from scipy.spatial import distance from psychopy.visual import TextStim,", "10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0]", "line? # Build the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses =", "the saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist),", "exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO", "gen_dist = np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist =", "p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color =", "np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in", "stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color =", "sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp):", "(i.e. in psychophys rig) if not p.keep_on_time: break # Assign pulse intensities max_contrast", "draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur):", "= exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if not", "in psychophys rig) if not p.keep_on_time: break # --- Build the trial_info structure", "draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play()", "size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns =", "stats.norm.ppf(qs, m, s) for pat, c in zip(patterns, cs): pat.contrast = c def", "assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState()", "blink_pad_start # Show each frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if", "as np import pandas as pd from scipy import stats from scipy.spatial import", "= all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time", "m1 = p.dist_means s0, s1 = p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1,", "in the remote? if self.trial_data: data = pd.DataFrame([t for t, _ in self.trial_data])", "exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"]", "Assign the stimulus to a side stim_pos = np.repeat([0, 1], n_trials // 2)", "np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist)", "of the last pulse if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~", "of limits.\"\"\" return val < limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum", "generation def not_in_range(val, limits): \"\"\"False if val is outside of limits.\"\"\" return val", "as the offset of the last pulse if not frame: p_info.loc[p, \"pulse_offset\"] =", "expand from s_idx = randint(len(queue)) s_x, s_y = queue[s_idx] for i in range(candidates):", "size / 2) & (np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples, [(x,", "visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from", "json from glob import glob import numpy as np import pandas as pd", "exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data, _ in exp.trial_data] pulse_data =", "return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support, p): \"\"\"Probability mass given truncated geometric distribution.\"\"\"", "height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data and exp.p.save_data:", "import json from glob import glob import numpy as np import pandas as", "# --- Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast,", "exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"])", "prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if", "0 samples = [start] queue = [start] while queue: # Pick a sample", "components # Assign the stimulus to a side stim_pos = np.repeat([0, 1], n_trials", "break # Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses)", "Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp):", "\"You're at {:.0%} correct today!\".format(total_correct), ]) n = len(lines) height = .5 heights", ") all_pulses = all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information that's", "flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() # Use", "# Show each frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not", "into visigoth # TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng", "rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr =", "t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if", "if not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period # Collect", "# Check the three conditions to accept the candidate in_array = (np.abs(x) <", "timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give", "\"\"\"Maximumum number of times the same value repeats in sequence.\"\"\" s = pd.Series(s)", "= all_pulses.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) # Add in information that's not part", "while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng)", "wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([", "not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for", "len(patterns) qs = np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs =", "draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius,", "np.nan for col in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan,", "+= 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of first", "p): \"\"\"Signed LLR of pulse based on contrast and generating distributions.\"\"\" m0, m1", "rng = np.random.RandomState() uniform = rng.uniform randint = rng.randint # Start at a", "return mean_acc, responses else: return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the", "pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) # --- Assign trial components #", "data = pd.DataFrame([t for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses =", "all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None):", "pulse counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf =", "+ trial_pulses.pulse_dur.sum() trial_duration = all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials", "p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period # Collect the response now", "fields that will be filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\",", "blink (according to Eyelink?) # TODO can we make life easier later by", "Command line? # Build the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses", "intuitively organized? return trial_info, pulse_info # --- Support functions for block generation def", "s1 = p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 =", "their performance.\"\"\" lines = [\"End of the run!\"] prior_trials = prior_correct = 0", "saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets,", "pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\"", "# --- Assign trial components # Map from trial to pulse trial =", "= exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"] =", "all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan for col in empty_cols} ) all_pulses", "limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum number of times the same", "trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info =", "while queue: # Pick a sample to expand from s_idx = randint(len(queue)) s_x,", "not p.keep_on_time: break # --- Build the trial_info structure trial = np.arange(1, n_trials", "(np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if", "Build the trial_info structure trial = np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict(", "frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we", "draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" #", "performance information.\"\"\" # TODO Track fixation breaks here? Also in the remote? if", "Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs =", "limits.\"\"\" return val < limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum number", "p.dist_means s0, s1 = p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1) l0,", "psychophys rig) if not p.keep_on_time: break # --- Build the trial_info structure trial", "all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for pos", "n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s)", "center[0] xy[:, 1] += center[1] self.box = Rect(exp.win, size + stim_size, size +", "(n / 2 - .5)) * height for line, y in zip(lines, heights):", "# Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color", "t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] =", "contrast and generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1 = p.dist_sds d0,", "m0, m1 = p.dist_means s0, s1 = p.dist_sds d0, d1 = stats.norm(m0, s0),", "return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\" t_info,", "pulse_count) expected_acc = stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info =", "= data[\"responded\"].sum() return mean_acc, responses else: return None, None def show_performance(exp, run_correct, run_trials):", "# def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True)", "constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts to each trial count_support =", "the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"]", "rng.randint # Start at a fixed point we know will work start =", "10 ** stats.norm.ppf(qs, m, s) for pat, c in zip(patterns, cs): pat.contrast =", "import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra", "size / 2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and", "time of the run (i.e. in psychophys rig) if not p.keep_on_time: break #", "= now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res =", "all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum() trial_duration", "pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean()", "target to a side gen_dist = np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist)", "(exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0])", "that's not part of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign(", "if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data) if", "wait_iti=wait_iti, )) # --- Assign trial components # Map from trial to pulse", "= 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 **", "s = exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s) for pat,", "to a side gen_dist = np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) >", "zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd = np.inf expected_acc = np.inf while", "Rect(exp.win, size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns", "= compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean()", "~~~ Response period # Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now", "we just going to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start", "None: rng = np.random.RandomState() uniform = rng.uniform randint = rng.randint # Start at", "np.inf llr_sd = np.inf expected_acc = np.inf while (not_in_range(llr_mean, constraints.mean_range) or not_in_range(llr_sd, constraints.sd_range)", "prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] =", "exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\"", "json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code # ----------------------------------------------------------------------- #", "= \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") #", "first random sample if we're not being precise # about the overall time", "is outside of limits.\"\"\" return val < limits[0] or val > limits[1] def", "period for p, info in p_info.iterrows(): # Allow aborts in the middle of", "Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color)", "side gen_dist = np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist", "move into visigoth # TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if", "# Map from trial to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for", "going to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show", "exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti)", "Assign gaps between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap,", "None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct", "info return t_info.to_json() def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation", "exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want to wait a smidge if", "pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns = patterns = [] for xy_i in xy: pattern", "to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count,", "for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do", "np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim = flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses,", "exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None):", "= exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if", "reset at beginning of frame_range # so it should correspond to frames dropped", "stim_pos = rng.permutation(stim_pos) # Assign the target to a side gen_dist = np.repeat([0,", "dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse based", "\"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means)", "= all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will be filled in later", "run_correct, run_trials): \"\"\"Show the subject a report of their performance.\"\"\" lines = [\"End", "** stats.norm.ppf(qs, m, s) for pat, c in zip(patterns, cs): pat.contrast = c", "(np.abs(x) < size / 2) & (np.abs(y) < size / 2) in_ring =", "exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7,", "in empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, )", "for n, i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd = np.inf", "StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size", "index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid,", "iti_duration=t_info.wait_iti) # ~~~ Trial onset t_info[\"onset_fix\"] = exp.clock.getTime() exp.s.fix.color = exp.p.fix_ready_color if exp.p.enforce_fix:", "pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals()", "= np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv", "for pat, c in zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw() for", "wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum()", "= uniform(radius, 2 * radius) x, y = s_x + r * np.cos(a),", "2 * radius) x, y = s_x + r * np.cos(a), s_y +", "import pandas as pd from scipy import stats from scipy.spatial import distance from", "out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True,", "= generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True)", "happens in each trial.\"\"\" t_info, p_info = info # ~~~ Set trial-constant attributes", "with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # #", "data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname", "1) count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to each trial", "exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) #", "(pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO reorder", "log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist,", "target_llr = np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std()", "a = uniform(0, 2 * np.pi) r = uniform(radius, 2 * radius) x,", "s_y = queue[s_idx] for i in range(candidates): # Generate a candidate from this", "for t, _ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc,", "max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target to a side", "pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() +", "part of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials = all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist),", "center, dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5", "of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add", "distributions.\"\"\" m0, m1 = p.dist_means s0, s1 = p.dist_sds d0, d1 = stats.norm(m0,", "that executes what happens in each trial.\"\"\" t_info, p_info = info # ~~~", "\"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False, **{col: np.nan", "if in_array and in_ring: # Accept the candidate samples.append((x, y)) queue.append((x, y)) break", "+ r * np.sin(a) # Check the three conditions to accept the candidate", "fixation breaks here? Also in the remote? if self.trial_data: data = pd.DataFrame([t for", "Check if the eye is blinking and possibly wait a bit if so", "exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\"", "in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"]", "and move into visigoth # TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/", "wait a smidge if they were blinking before # showing the stimulus? How", "max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i,", "serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\" t_info, _ = info return", "not_in_range(val, limits): \"\"\"False if val is outside of limits.\"\"\" return val < limits[0]", "lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct =", "Point, Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values from visigoth.ext.bunch import", "+= 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not", "constraints.sd_range) or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist = \"norm\", p.dist_means[i],", "# so it should correspond to frames dropped during the stim p_info.loc[p, \"dropped_frames\"]", "info # ~~~ Set trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos", "of their performance.\"\"\" lines = [\"End of the run!\"] prior_trials = prior_correct =", "exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True) else: exp.wait_until(exp.iti_end, draw=\"fix\", check_abort=True, iti_duration=t_info.wait_iti) # ~~~", "np.sin(a) # Check the three conditions to accept the candidate in_array = (np.abs(x)", "counts to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support,", "p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame:", "while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign pulse counts to each", "exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\", \"targets\"]) # TODO do we want to", "(exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"]", "exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True)", "= info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we get a good solution", "during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not", "all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of", "/= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in name information that matches across", "\"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~", "np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([", "stats.norm(dv, dv_sd).sf(0).mean() # --- Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse,", "eye is blinking and possibly wait a bit if so blink_pad_start = exp.clock.getTime()", "in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"]", "to each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1])", "candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make more general and", "n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield", "# Add in information that's not part of the saved design gen_dist =", "10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0],", "t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\", t_info.result, t_info.response) exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"])", "fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related code # -----------------------------------------------------------------------", "might be only part of a run.\"\"\" if rng is None: rng =", "i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd = np.inf expected_acc =", "= flip_time blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter", "# Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until", "= exp.p.fix_iti_color exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for", "visigoth.ext.bunch import Bunch def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined at", "exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) #", "dist, size=8): stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5 xy", "at beginning of frame_range # so it should correspond to frames dropped during", "def define_cmdline_params(self, parser): \"\"\"Add extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1,", "t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record", "division import os import json from glob import glob import numpy as np", "trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated a balanced set of", "that will be filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\",", "\"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\",", "= all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some", "xy[:, 1] += center[1] self.box = Rect(exp.win, size + stim_size, size + stim_size,", "pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for n, i in zip(pulse_count, gen_dist) ])", "t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp,", "TODO can we make life easier later by updating the gap duration #", "- blink_pad_start # Show each frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur):", "more intuitively organized? return trial_info, pulse_info # --- Support functions for block generation", "Demo-related code # ----------------------------------------------------------------------- # def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color", "array # See http://bost.ocks.org/mike/algorithms/ if rng is None: rng = np.random.RandomState() uniform =", "np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n, i, dtype=np.int) for", "i in range(candidates): # Generate a candidate from this sample a = uniform(0,", "for _, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\"", "frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"] = flip_time # ~~~ Stimulus period for p,", "now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp, t_info.target,", "while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0", "< limits[0] or val > limits[1] def max_repeat(s): \"\"\"Maximumum number of times the", "wait a bit if so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if", "\"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid:", "\"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\",", "stats from scipy.spatial import distance from psychopy.visual import TextStim, Rect from visigoth.stimuli import", "= np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos)", "1): trial_part, pulse_part = generate_block(constraints, exp.p) trial_part[\"trial\"] += len(all_trials) pulse_part[\"trial\"] += len(all_trials) all_trials", "t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\")", "n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos, pulse_count=pulse_count.astype(int), wait_iti=wait_iti, )) #", "period # Collect the response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] =", "because the info df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check", "(all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) #", "** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] -", "exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\", check_abort=True)", "data files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data for t_data,", "the same value repeats in sequence.\"\"\" s = pd.Series(s) switch = s !=", "= exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"], draw=\"fix\",", "will work start = 0, 0 samples = [start] queue = [start] while", "+ 1) == candidates: # We've exhausted the particular sample queue.pop(s_idx) return np.asarray(samples)", "show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report of their performance.\"\"\" lines =", "= pd.Series(s) switch = s != s.shift(1) return switch.groupby(switch.cumsum()).cumcount().max() + 1 def trunc_geom_pmf(support,", "frames dropped during the stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur):", "structure trial = np.arange(1, n_trials + 1) trial_info = pd.DataFrame(dict( trial=trial, gen_dist=gen_dist, stim_pos=stim_pos,", "precise # about the overall time of the run (i.e. in psychophys rig)", "exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) #", "y)) break if (i + 1) == candidates: # We've exhausted the particular", "0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f)", "s_y + r * np.sin(a) # Check the three conditions to accept the", "= all_trials.assign( gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses =", "np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf = exp.p.stim_sf *", "blank fields that will be filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\",", "skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\"", "res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\" else:", "lines = [\"End of the run!\"] prior_trials = prior_correct = 0 output_dir =", "exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~ Inter-trial interval exp.s.fix.color = exp.p.fix_iti_color if exp.p.keep_on_time: exp.wait_until(t_info[\"start_time\"],", "if val is outside of limits.\"\"\" return val < limits[0] or val >", "data.to_csv(out_data_fname, index=False) out_json_fname = exp.output_stem + \"_params.json\" with open(out_json_fname, \"w\") as fid: json.dump(exp.p,", "s_x, s_y = queue[s_idx] for i in range(candidates): # Generate a candidate from", "--- Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10", "exp.wait_until(timeout=exp.p.wait_feedback, draw=[\"targets\"]) exp.s.targets.color = exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color =", "= flip_time # ~~~ Stimulus period for p, info in p_info.iterrows(): # Allow", "of the run (i.e. in psychophys rig) if not p.keep_on_time: break # Assign", "t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not", "trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error = np.inf while count_error >", "before # showing the stimulus? How much vision do people have right when", "of the run (i.e. in psychophys rig) if not p.keep_on_time: break # ---", "in psychophys rig) if not p.keep_on_time: break # Assign pulse intensities max_contrast =", "np.arange(c) + 1 for c in pulse_count ]) n_pulses = pulse_count.sum() # Assign", "a good solution for saving these # Currently it errors out (maybe because", "= all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information", "\"\"\"Yield trial and pulse train info.\"\"\" # TODO let us set random number", "else: return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show the subject a report", "= np.repeat([0, 1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist)", "lineColor=\"white\") self.patterns = patterns = [] for xy_i in xy: pattern = Pattern(exp.win,", "prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not", "n = len(patterns) qs = np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist], exp.p.dist_sds[dist]", "exp.p.save_data: trial_data = [t_data for t_data, _ in exp.trial_data] pulse_data = [p_data for", "to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each", "easier later by updating the gap duration # information or are we just", "us set random number generator somehow. Command line? # Build the full experimental", "not frame: p_info.loc[p, \"pulse_offset\"] = flip_time # ~~~ Response period # Collect the", "compute_llr(c, p): \"\"\"Signed LLR of pulse based on contrast and generating distributions.\"\"\" m0,", "poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1] += center[1] self.box = Rect(exp.win,", "count_error = np.sum(np.abs(count_dist[count_support] - expected_count_dist)) # Assign initial ITI to each trial total_iti", "exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1):", "a balanced set of trials, might be only part of a run.\"\"\" if", "= poisson_disc_sample(size, stim_size) xy[:, 0] += center[0] xy[:, 1] += center[1] self.box =", "# --- Assign trial components # Assign the stimulus to a side stim_pos", "= exp.p.fix_ready_color if exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None:", "Assign trial components # Assign the stimulus to a side stim_pos = np.repeat([0,", "of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play()", "from glob import glob import numpy as np import pandas as pd from", "exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) #", "--- Assign trial components # Map from trial to pulse trial = np.concatenate([", "p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the", "stim p_info.loc[p, \"dropped_frames\"] = exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if", "check_abort=True) exp.s[\"box_l\"] = StimBox(exp, [-7, 0], 0) exp.s[\"box_h\"] = StimBox(exp, [+7, 0], 1)", "= flexible_values(p.pulse_gap, n_trials, rng) gap_dur = flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(),", "\"*_trials.csv\")) if prior_fnames: prior_data = pd.concat([pd.read_csv(f) for f in prior_fnames]) prior_trials = len(prior_data)", "/ 2 - .5)) * height for line, y in zip(lines, heights): TextStim(exp.win,", "draw=\"targets\") if res is None: t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback", "they are more intuitively organized? return trial_info, pulse_info # --- Support functions for", "1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\",", "Currently it errors out (maybe because the info df isn't seeded?) # p_info.loc[p,", "exp.p.enforce_fix: res = exp.wait_until(AcquireFixation(exp), timeout=exp.p.wait_fix, draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\"", "1], n_trials // 2) while max_repeat(gen_dist) > constraints.max_dist_repeat: gen_dist = rng.permutation(gen_dist) # Assign", "blink = not exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter is reset", "t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of", "np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\",", "info in p_info.iterrows(): # Allow aborts in the middle of a trial exp.check_abort()", "def max_repeat(s): \"\"\"Maximumum number of times the same value repeats in sequence.\"\"\" s", "exp.sounds.nofix.play() return t_info, p_info for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus", "10 ** np.mean(exp.p.dist_means) for pos in [0, 1]: exp.s.cue.pos = exp.p.stim_pos[pos] exp.s.pattern.pos =", "exp.draw(\"fix\") return t_info, p_info def serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\"", "trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def", "Exeperiment execution def run_trial(exp, info): \"\"\"Function that executes what happens in each trial.\"\"\"", "for frame in exp.frame_range(seconds=exp.p.wait_start): exp.check_fixation(allow_blinks=True) exp.draw(\"fix\") # ~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color", "the middle of a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast", "data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data)", "sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist, size=8): stim_sf", "= 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames: prior_data =", "exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\",", "exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def", "// 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target", "from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum()", "--- Support functions for block generation def not_in_range(val, limits): \"\"\"False if val is", "in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\",", "stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 -", "# TODO reorder the columns so they are more intuitively organized? return trial_info,", "_, p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname,", "or not_in_range(expected_acc, constraints.acc_range)): for i in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i]", "Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we", "in blank fields that will be filled in later empty_cols = [\"onset_fix\", \"offset_fix\",", "\"pulse_offset\"] = flip_time # ~~~ Response period # Collect the response now =", "if rng is None: rng = np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign", "= AcquireTarget(exp, t_info.target, allow_retry=not exp.p.enforce_fix) res = exp.wait_until(response_handler, timeout=exp.p.wait_resp, draw=\"targets\") if res is", "radius) if in_array and in_ring: # Accept the candidate samples.append((x, y)) queue.append((x, y))", "# TODO can we make life easier later by updating the gap duration", "times the same value repeats in sequence.\"\"\" s = pd.Series(s) switch = s", "parser): \"\"\"Add extra parameters to be defined at runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\",", "exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating stimulus pattern = Pattern(exp.win,", "draw=\"fix\") if res is None: t_info[\"result\"] = \"nofix\" exp.sounds.nofix.play() return t_info, p_info for", "np.log10(d1.pdf(c)) llr = l1 - l0 return llr # --- Exeperiment execution def", "t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1", "for t_data, _ in exp.trial_data] pulse_data = [p_data for _, p_data in exp.trial_data]", "pulse_count.sum() # Assign gaps between pulses run_duration = np.inf while not_in_range(run_duration, constraints.run_range): wait_pre_stim", "exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\", \"cue\"]) for frame in", "]) pulse = np.concatenate([ np.arange(c) + 1 for c in pulse_count ]) n_pulses", "if not p.keep_on_time: break # --- Build the trial_info structure trial = np.arange(1,", "and possibly wait a bit if so blink_pad_start = exp.clock.getTime() for frame in", "Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color) # Spatial cue cue =", "were blinking before # showing the stimulus? How much vision do people have", "pulse_info = all_pulses.loc[all_pulses[\"trial\"] == trial].copy() yield trial_info, pulse_info def generate_block(constraints, p, rng=None): \"\"\"Generated", "first flip as the offset of the last pulse if not frame: p_info.loc[p,", "in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data", "correct!\".format(run_correct), ]) if (prior_trials + run_trials): total_correct = np.average([prior_correct, run_correct], weights=[prior_trials, run_trials]) lines.extend([", "import TextStim, Rect from visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth import", "= exp.p.dist_means[dist], exp.p.dist_sds[dist] cs = 10 ** stats.norm.ppf(qs, m, s) for pat, c", "= flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() #", "]) # Use the first random sample if we're not being precise #", "for i in [0, 1]: dist = \"norm\", p.dist_means[i], p.dist_sds[i] rows = pulse_dist", "it errors out (maybe because the info df isn't seeded?) # p_info.loc[p, \"phases\"]", "be filled in later empty_cols = [\"onset_fix\", \"offset_fix\", \"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\",", "/= exp.p.acceleration # Add in name information that matches across tables all_trials =", "pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\"", "df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the eye", "information that matches across tables all_trials = all_trials.assign( subject=exp.p.subject, session=exp.p.session, run=exp.p.run ) all_pulses", "2) in_ring = np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and in_ring: #", "some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"] /= exp.p.acceleration # Add in", "+= len(all_trials) all_trials = all_trials.append(trial_part, ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the", "work start = 0, 0 samples = [start] queue = [start] while queue:", "a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO", "constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if p.skip_first_iti: wait_iti[0] = 0 total_iti =", "so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break exp.draw([\"fix\", \"cue\",", "subject a report of their performance.\"\"\" lines = [\"End of the run!\"] prior_trials", "in_ring: # Accept the candidate samples.append((x, y)) queue.append((x, y)) break if (i +", "being precise # about the overall time of the run (i.e. in psychophys", "ignore_index=True) all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some components for", "for i, c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) + 1", "def compute_performance(self): \"\"\"Compute run-wise performance information.\"\"\" # TODO Track fixation breaks here? Also", "out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem", "r * np.sin(a) # Check the three conditions to accept the candidate in_array", "Spatial cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets =", "= (pulse_info .groupby(\"trial\") .sum() .loc[:, \"pulse_llr\"] .rename(\"trial_llr\")) trial_info = trial_info.join(trial_llr, on=\"trial\") # TODO", "run (i.e. in psychophys rig) if not p.keep_on_time: break # --- Build the", "of the run!\"] prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames =", "for frame, skipped in prestim_frames: if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"]", "particular sample queue.pop(s_idx) return np.asarray(samples) class StimBox(object): def __init__(self, exp, center, dist, size=8):", "= rng.randint # Start at a fixed point we know will work start", "# Build the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints,", "|= blink # This counter is reset at beginning of frame_range # so", "= stats.norm(m0, s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1", "exp.frame_range(seconds=exp.p.pulse_dur): exp.draw(all_stims) exp.wait_until(\"space\", draw=[\"fix\", \"targets\", \"cue\"], check_abort=True) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10", "we get a good solution for saving these # Currently it errors out", "# Adjust the timing of some components for training all_trials[\"wait_pre_stim\"] /= exp.p.acceleration all_pulses[\"gap_dur\"]", "pulse_llr=pulse_llr, )) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr =", "generating distributions.\"\"\" m0, m1 = p.dist_means s0, s1 = p.dist_sds d0, d1 =", "stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return", "max(support) dist = stats.geom(p=p, loc=a) return dist.pmf(support) / (dist.cdf(b) - dist.cdf(a)) def compute_llr(c,", "p.keep_on_time: break # Assign pulse intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast =", "are we just going to have to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() -", "np.random.RandomState() n_trials = constraints.trials_per_run # --- Assign trial components # Assign the stimulus", "_ in self.trial_data]) mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else:", "beginning of frame_range # so it should correspond to frames dropped during the", "queue = [start] while queue: # Pick a sample to expand from s_idx", "trial-constant attributes of the stimuli exp.s.cue.pos = exp.p.stim_pos[t_info.stim_pos] exp.s.pattern.pos = exp.p.stim_pos[t_info.stim_pos] # ~~~", "if p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() # Use the first random", "all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some components for training all_trials[\"wait_pre_stim\"] /=", "constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the target to a side gen_dist =", "uniform(0, 2 * np.pi) r = uniform(radius, 2 * radius) x, y =", "run_correct], weights=[prior_trials, run_trials]) lines.extend([ \"\", \"You're at {:.0%} correct today!\".format(total_correct), ]) n =", "all_pulses = all_pulses.append(pulse_part, ignore_index=True) # Adjust the timing of some components for training", "np.all(distance.cdist(samples, [(x, y)]) > radius) if in_array and in_ring: # Accept the candidate", "Build the pulse_info structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 **", "== i n = rows.sum() log_contrast[rows] = flexible_values(dist, n, rng, max=max_contrast) pulse_llr =", "data[\"responded\"].sum() return mean_acc, responses else: return None, None def show_performance(exp, run_correct, run_trials): \"\"\"Show", "information for each trial for trial, trial_info in all_trials.iterrows(): pulse_info = all_pulses.loc[all_pulses[\"trial\"] ==", "the gap duration # information or are we just going to have to", "0) ) return locals() def generate_trials(exp): \"\"\"Yield trial and pulse train info.\"\"\" #", "parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix =", "in information that's not part of the saved design gen_dist = all_trials[\"gen_dist\"] all_trials", "s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0 return llr", "1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"] = flip_time t_info[\"onset_cue\"]", "y)]) > radius) if in_array and in_ring: # Accept the candidate samples.append((x, y))", "gen_mean=np.take(exp.p.dist_means, gen_dist), gen_sd=np.take(exp.p.dist_sds, gen_dist), target=np.take(exp.p.dist_targets, gen_dist), wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) #", "aborts in the middle of a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast", "check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast =", "pulse = np.concatenate([ np.arange(c) + 1 for c in pulse_count ]) n_pulses =", "Assign trial components # Map from trial to pulse trial = np.concatenate([ np.full(c,", "from scipy.spatial import distance from psychopy.visual import TextStim, Rect from visigoth.stimuli import Point,", "sample if we're not being precise # about the overall time of the", "else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) if not frame: t_info[\"onset_targets\"]", "+ exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\",", "np.where(pulse_dist, pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv =", "(dist.cdf(b) - dist.cdf(a)) def compute_llr(c, p): \"\"\"Signed LLR of pulse based on contrast", "to deal? p_info.loc[p, \"blink_pad\"] = exp.clock.getTime() - blink_pad_start # Show each frame of", "run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ]) # Use the", "exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of first flip as the offset", "import division import os import json from glob import glob import numpy as", "to a side stim_pos = np.repeat([0, 1], n_trials // 2) while max_repeat(stim_pos) >", "stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\")", "all_trials[\"wait_pre_stim\"] + pulse_train_dur start_time = (all_trials[\"wait_iti\"].cumsum() + trial_duration.shift(1).fillna(0).cumsum()) all_trials = all_trials.assign( trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(),", "for f in prior_fnames]) prior_trials = len(prior_data) if prior_trials: prior_correct = prior_data[\"correct\"].mean() if", "p.skip_first_iti: wait_iti[0] = 0 total_iti = wait_iti.sum() # Use the first random sample", "= prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames = glob(os.path.join(output_dir, \"*_trials.csv\")) if prior_fnames:", "save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data", "dtype=np.int) for n, i in zip(pulse_count, gen_dist) ]) llr_mean = np.inf llr_sd =", "they come out of the blink (according to Eyelink?) # TODO can we", "sfs=exp.p.stim_sf, pos=(0, 0) ) return locals() def generate_trials(exp): \"\"\"Yield trial and pulse train", "1 for c in pulse_count ]) n_pulses = pulse_count.sum() # Assign gaps between", "y), height=height).draw() exp.win.flip() def save_data(exp): \"\"\"Output data files to disk.\"\"\" if exp.trial_data and", "= StimBox(exp, [+7, 0], 1) exp.wait_until(\"space\", draw=[\"fix\", \"box_h\", \"box_l\"], check_abort=True) exp.sounds[\"correct\"].play() exp.wait_until(\"space\", draw=\"fix\",", "create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point fix = Point(exp.win, exp.p.fix_pos, exp.p.fix_radius, exp.p.fix_iti_color)", "= Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple sinusoidal grating stimulus pattern", "\"onset_cue\", \"offset_cue\", \"onset_targets\", \"onset_feedback\", \"result\", \"response\", \"correct\", \"rt\"] all_trials = all_trials.assign( fixbreaks=0, responded=False,", "random sample if we're not being precise # about the overall time of", "each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng)", "= p.dist_means s0, s1 = p.dist_sds d0, d1 = stats.norm(m0, s0), stats.norm(m1, s1)", "exp.p.stim_pos[pos] exp.s.pattern.pos = exp.p.stim_pos[pos] exp.wait_until(\"space\", draw=all_stims, check_abort=True) for frame in exp.frame_range(seconds=1): exp.draw([\"fix\", \"targets\",", "index=False) data = pd.concat(pulse_data) out_data_fname = exp.output_stem + \"_pulses.csv\" data.to_csv(out_data_fname, index=False) out_json_fname =", "> constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials, rng, max=p.pulse_count_max).astype(int) count_dist = np.bincount(pulse_count, minlength=p.pulse_count_max +", "self.box = Rect(exp.win, size + stim_size, size + stim_size, pos=center, fillColor=exp.win.color, lineColor=\"white\") self.patterns", "< size / 2) & (np.abs(y) < size / 2) in_ring = np.all(distance.cdist(samples,", "mean_acc = data[\"correct\"].mean() responses = data[\"responded\"].sum() return mean_acc, responses else: return None, None", "the eye is blinking and possibly wait a bit if so blink_pad_start =", "p.pulse_dur * n_pulses, ]) # Use the first random sample if we're not", "= 10 ** stats.norm.ppf(qs, m, s) for pat, c in zip(patterns, cs): pat.contrast", "the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out until we get", "pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False) data = pd.concat(pulse_data) out_data_fname =", "initial ITI to each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti =", "empty_cols} ) all_pulses = all_pulses.assign( occurred=False, blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) #", "n, rng, max=max_contrast) pulse_llr = compute_llr(log_contrast, p) target_llr = np.where(pulse_dist, pulse_llr, -1 *", "2 * np.pi) r = uniform(radius, 2 * radius) x, y = s_x", "patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95, n) m, s = exp.p.dist_means[dist],", "rig) if not p.keep_on_time: break # --- Build the trial_info structure trial =", "import os import json from glob import glob import numpy as np import", "s0), stats.norm(m1, s1) l0, l1 = np.log10(d0.pdf(c)), np.log10(d1.pdf(c)) llr = l1 - l0", "updating the gap duration # information or are we just going to have", "trial_llr=trial_pulses.pulse_llr.sum(), log_contrast_mean=trial_pulses.log_contrast.mean(), pulse_train_dur=pulse_train_dur, trial_duration=trial_duration, start_time=start_time, ) # Generate information for each trial for", "all_pulses = generate_block(constraints, exp.p) for i in range(exp.p.blocks - 1): trial_part, pulse_part =", "in the middle of a trial exp.check_abort() # Update the pattern exp.s.pattern.contrast =", "p_data in exp.trial_data] data = pd.DataFrame(trial_data) out_data_fname = exp.output_stem + \"_trials.csv\" data.to_csv(out_data_fname, index=False)", "flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True p_info.loc[p, \"pulse_onset\"]", "table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur = trial_pulses.gap_dur.sum() + trial_pulses.pulse_dur.sum()", "from trial to pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for i, c", "= exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time of first flip as the", "set random number generator somehow. Command line? # Build the full experimental design", "pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info", "size=8): stim_sf = exp.p.stim_sf * 2 stim_size = exp.p.stim_size / 5 xy =", "response now = exp.clock.getTime() t_info[\"offset_fix\"] = now t_info[\"offset_cue\"] = now response_handler = AcquireTarget(exp,", "blink=False, blink_pad=np.nan, dropped_frames=np.nan, pulse_onset=np.nan, pulse_offset=np.nan, ) # Add trial-level information computed from pulse-level", "set of trials, might be only part of a run.\"\"\" if rng is", "exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast =", "exp.tracker.check_eye_open(new_sample=False) p_info.loc[p, \"blink\"] |= blink # This counter is reset at beginning of", "~~~ Pre-stimulus period exp.s.fix.color = exp.p.fix_trial_color prestim_frames = exp.frame_range(seconds=t_info.wait_pre_stim, yield_skipped=True) for frame, skipped", "bit if so blink_pad_start = exp.clock.getTime() for frame in exp.frame_range(seconds=exp.p.blink_pad_timeout): if exp.check_fixation(): break", "= \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims", "blink # This counter is reset at beginning of frame_range # so it", "c in zip(patterns, cs): pat.contrast = c def draw(self): self.box.draw() for p in", "# Assign initial ITI to each trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range):", "structure pulse_info = pd.DataFrame(dict( trial=trial, pulse=pulse, gap_dur=gap_dur, log_contrast=log_contrast, contrast=10 ** log_contrast, pulse_llr=pulse_llr, ))", "information.\"\"\" # TODO Track fixation breaks here? Also in the remote? if self.trial_data:", "\"cue\", \"targets\"]) # Record the time of first flip as the offset of", "count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist = count_pmf * n_trials count_error = np.inf while", "much vision do people have right when # they come out of the", "the full experimental design constraints = Bunch(exp.p.design_constraints) all_trials, all_pulses = generate_block(constraints, exp.p) for", "* n_trials count_error = np.inf while count_error > constraints.sum_count_error: pulse_count = flexible_values(p.pulse_count, n_trials,", "TODO currently assumes square array # See http://bost.ocks.org/mike/algorithms/ if rng is None: rng", "\"\"\"Output data files to disk.\"\"\" if exp.trial_data and exp.p.save_data: trial_data = [t_data for", "= exp.clock.getTime() return t_info, p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\",", "rig) if not p.keep_on_time: break # Assign pulse intensities max_contrast = np.log10(1 /", "cue cue = PointCue(exp.win, exp.p.cue_norm, exp.p.cue_radius, exp.p.cue_color) # Saccade targets targets = Points(exp.win,", "exp.check_abort() # Update the pattern exp.s.pattern.contrast = info.contrast exp.s.pattern.randomize_phases() # TODO commenting out", "n_trials // 2) while max_repeat(stim_pos) > constraints.max_stim_repeat: stim_pos = rng.permutation(stim_pos) # Assign the", "def poisson_disc_sample(size, radius, candidates=100, rng=None): \"\"\"Find positions using poisson-disc sampling.\"\"\" # TODO make", "t_data, _ in exp.trial_data] pulse_data = [p_data for _, p_data in exp.trial_data] data", "def serialize_trial_info(exp, info): \"\"\"Package trial information for the remote.\"\"\" t_info, _ = info", "the target to a side gen_dist = np.repeat([0, 1], n_trials // 2) while", "[\"End of the run!\"] prior_trials = prior_correct = 0 output_dir = os.path.dirname(exp.output_stem) prior_fnames", "is not None: lines.extend([ \"\", \"You got {:.0%} correct!\".format(run_correct), ]) if (prior_trials +", "we're not being precise # about the overall time of the run (i.e.", "dtype=np.int) for i, c in enumerate(pulse_count, 1) ]) pulse = np.concatenate([ np.arange(c) +", "p.dist_sds[i] rows = pulse_dist == i n = rows.sum() log_contrast[rows] = flexible_values(dist, n,", "max_repeat(s): \"\"\"Maximumum number of times the same value repeats in sequence.\"\"\" s =", "flexible_values(p.pulse_gap, n_pulses, rng) run_duration = np.sum([ wait_iti.sum(), wait_pre_stim.sum(), gap_dur.sum(), p.pulse_dur * n_pulses, ])", "wait_resp=exp.p.wait_resp, wait_feedback=exp.p.wait_feedback, ) all_pulses = all_pulses.assign(pulse_dur=exp.p.pulse_dur) # Add in blank fields that will", "* radius) x, y = s_x + r * np.cos(a), s_y + r", "\"targets\", \"pattern\"] flip_time = exp.draw(stims) if not frame: exp.tracker.send_message(\"pulse_onset\") p_info.loc[p, \"occurred\"] = True", "a report of their performance.\"\"\" lines = [\"End of the run!\"] prior_trials =", "from visigoth.stimuli import Point, Points, PointCue, Pattern from visigoth import AcquireFixation, AcquireTarget, flexible_values", "draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[0] - exp.p.dist_sds[0]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s[\"box_l\"]", "smidge if they were blinking before # showing the stimulus? How much vision", "draw=all_stims, check_abort=True) exp.s.pattern.contrast = 10 ** (exp.p.dist_means[1] + exp.p.dist_sds[1]) exp.wait_until(\"space\", draw=all_stims, check_abort=True) exp.s.pattern.contrast", "exp.p.cue_color) # Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of", "runtime.\"\"\" parser.add_argument(\"--acceleration\", default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" #", "information computed from pulse-level table all_trials = all_trials.set_index(\"trial\", drop=False) trial_pulses = all_pulses.groupby(\"trial\") pulse_train_dur", "pulse_llr, -1 * pulse_llr) llr_mean = target_llr.mean() llr_sd = target_llr.std() dv = pd.Series(target_llr).groupby(pd.Series(trial)).sum()", "rng=None): \"\"\"Generated a balanced set of trials, might be only part of a", "info df isn't seeded?) # p_info.loc[p, \"phases\"] = exp.s.pattern.array.phases # Check if the", "prior_trials: prior_correct = prior_data[\"correct\"].mean() if run_correct is not None: lines.extend([ \"\", \"You got", "trial total_iti = np.inf while not_in_range(total_iti, constraints.iti_range): wait_iti = flexible_values(p.wait_iti, n_trials, rng) if", "each trial count_support = np.arange(p.pulse_count[-1], p.pulse_count_max) + 1 count_pmf = trunc_geom_pmf(count_support, p.pulse_count[1]) expected_count_dist", "return t_info, p_info else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) #", "multiple sinusoidal grating stimulus pattern = Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=exp.p.stim_size, sfs=exp.p.stim_sf, pos=(0,", "t_info[\"result\"] = \"nochoice\" else: t_info.update(pd.Series(res)) # Give feedback t_info[\"onset_feedback\"] = exp.clock.getTime() exp.sounds[t_info.result].play() exp.show_feedback(\"targets\",", "exp.win.nDroppedFrames for frame in exp.frame_range(seconds=info.gap_dur): if not exp.check_fixation(allow_blinks=True): if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"]", "= exp.p.target_color # Prepare for the inter-trial interval exp.s.fix.color = exp.p.fix_iti_color exp.draw(\"fix\") return", "pulse trial = np.concatenate([ np.full(c, i, dtype=np.int) for i, c in enumerate(pulse_count, 1)", "contrast=10 ** log_contrast, pulse_llr=pulse_llr, )) # --- Update the trial_info structure trial_info[\"wait_pre_stim\"] =", "check_abort=True) exp.sounds[\"wrong\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.sounds[\"fixbreak\"].play() exp.wait_until(\"space\", draw=\"fix\", check_abort=True) def poisson_disc_sample(size, radius, candidates=100,", "def demo_mode(exp): exp.wait_until(\"space\", draw=\"fix\", check_abort=True) exp.s.fix.color = exp.p.fix_trial_color exp.wait_until(\"space\", draw=[\"fix\", \"targets\"], check_abort=True) all_stims", "sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs = np.linspace(.05, .95, n) m,", "open(out_json_fname, \"w\") as fid: json.dump(exp.p, fid, sort_keys=True, indent=4) # ----------------------------------------------------------------------- # # Demo-related", "intensities max_contrast = np.log10(1 / np.sqrt(p.stim_gratings)) log_contrast = np.zeros(n_pulses) pulse_dist = np.concatenate([ np.full(n,", "default=1, type=float) parser.add_argument(\"--blocks\", default=1, type=int) def create_stimuli(exp): \"\"\"Initialize stimulus objects.\"\"\" # Fixation point", "if exp.p.enforce_fix: exp.sounds.fixbreak.play() exp.flicker(\"fix\") t_info[\"result\"] = \"fixbreak\" t_info[\"offset_cue\"] = exp.clock.getTime() return t_info, p_info", "same value repeats in sequence.\"\"\" s = pd.Series(s) switch = s != s.shift(1)", "else: t_info[\"fixbreaks\"] += 1 flip_time = exp.draw([\"fix\", \"cue\", \"targets\"]) # Record the time", "# Saccade targets targets = Points(exp.win, exp.p.target_pos, exp.p.target_radius, exp.p.target_color) # Average of multiple", "Update the trial_info structure trial_info[\"wait_pre_stim\"] = wait_pre_stim trial_llr = (pulse_info .groupby(\"trial\") .sum() .loc[:,", "or val > limits[1] def max_repeat(s): \"\"\"Maximumum number of times the same value", "check_abort=True) all_stims = [\"fix\", \"targets\", \"cue\", \"pattern\"] exp.s.pattern.contrast = 10 ** np.mean(exp.p.dist_means) for", "p_info else: t_info[\"fixbreaks\"] += 1 stims = [\"fix\", \"cue\", \"targets\", \"pattern\"] flip_time =", "= Pattern(exp.win, n=exp.p.stim_gratings, elementTex=exp.p.stim_tex, elementMask=exp.p.stim_mask, sizes=stim_size, sfs=stim_sf, pos=xy_i) patterns.append(pattern) n = len(patterns) qs", "each frame of the stimulus for frame in exp.frame_range(seconds=info.pulse_dur): if not exp.check_fixation(allow_blinks=True): if" ]
[ "= ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID():", "vol from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const import", "})) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs = hub.make_card(config[CONF_NAME], config[CONF_UID]) binary_sensor.register_binary_sensor(rhs,", "<reponame>khzd/pi4home import voluptuous as vol from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as", "'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID):", "cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs =", "import voluptuous as vol from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv", "= 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t,", "cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in", "cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs = hub.make_card(config[CONF_NAME],", "import binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID from", "pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID", "['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor),", "voluptuous as vol from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv from", "as cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES =", "DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({", "import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA", "CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor =", "rdm6300 import pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import", "cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300']", "from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor',", "PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for", "vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs", "cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs = hub.make_card(config[CONF_NAME], config[CONF_UID])", "CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID):", "pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES", "as vol from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const", "= binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) }))", "def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield rhs = hub.make_card(config[CONF_NAME], config[CONF_UID]) binary_sensor.register_binary_sensor(rhs, config)", "import pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable", "pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID =", "binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def", "from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID", "binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config):", "cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]): yield", "pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor)", "CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor", "get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id' RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA =", "binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const import CONF_NAME, CONF_UID from pi4home.cpp_generator", "import CONF_NAME, CONF_UID from pi4home.cpp_generator import get_variable DEPENDENCIES = ['rdm6300'] CONF_RDM6300_ID = 'rdm6300_id'", "= cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub", "cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component) })) def to_code(config): for hub in get_variable(config[CONF_RDM6300_ID]):", "RDM6300BinarySensor = binary_sensor.binary_sensor_ns.class_('RDM6300BinarySensor', binary_sensor.BinarySensor) PLATFORM_SCHEMA = cv.nameable(binary_sensor.BINARY_SENSOR_PLATFORM_SCHEMA.extend({ cv.GenerateID(): cv.declare_variable_id(RDM6300BinarySensor), vol.Required(CONF_UID): cv.uint32_t, cv.GenerateID(CONF_RDM6300_ID): cv.use_variable_id(rdm6300.RDM6300Component)", "from pi4home.components import binary_sensor, rdm6300 import pi4home.config_validation as cv from pi4home.const import CONF_NAME," ]
[ "Auth from .stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\"", "import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None", "Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property def stored(self)", "= \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property def stored(self) -> Stored:", "\"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property def stored(self) -> Stored: return", "from .stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3", "Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property", "from aiocloudflare.commons.auth import Auth from .stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\"", "class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property def", "_endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 = None @property def stored(self) ->", "<gh_stars>1-10 from aiocloudflare.commons.auth import Auth from .stored.stored import Stored class Analytics(Auth): _endpoint1 =", ".stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 = \"storage/analytics\" _endpoint3 =", "aiocloudflare.commons.auth import Auth from .stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2", "import Auth from .stored.stored import Stored class Analytics(Auth): _endpoint1 = \"accounts\" _endpoint2 =", "= \"storage/analytics\" _endpoint3 = None @property def stored(self) -> Stored: return Stored(self._config, self._session)", "_endpoint2 = \"storage/analytics\" _endpoint3 = None @property def stored(self) -> Stored: return Stored(self._config," ]
[ "\"outputs\"]) p = Pool(20) # Basic workflow: # MAP: [list of files to", "a new window\\n\"] # Processes the contents of an XSUM file and returns", "if len(summary) < 30: return None return {\"summary\": summary, \"text\": text} tok =", "= splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept", "the title as the second sentence, forcing the proper token types. title_enc =", "\"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder", "import glob, os from multiprocessing import Pool import random import torch # This", "= [m for m in all_texts if m is not None] print(\"Tokenizing news..\")", "{ \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": #", "\"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are", "map function for processing reviews. It returns a dict: # { 'text' {", "pad_to_max_length=False ) # Push resultants to a simple list and return it return", "dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": # Fetch the news.", "# MAP: [list of files to process] => list_of_news # MAP: [single list", "# MAP: [single list of shuffled news] => map_tokenize_news # REDUCE: [tokenized results]", "random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder):", "of an XSUM file and returns a dict: {'text', 'summary'} def map_read_files(filepath): with", "very bad examples. if len(text) < 1024: return None if len(summary) < 30:", "title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to a simple list and", "processes news articles gathered and preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum", "types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to", "return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map", "len(summary) < 30: return None return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\")", "print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts = [m for m in", "if len(text) < 1024: return None if len(summary) < 30: return None return", "# # The pre-processor generates a large number of files, each of which", "train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news, \"/\".join([output_folder, \"val.pt\"]))", "} } def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None,", "for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept content with", "# This is a map function for processing reviews. It returns a dict:", "input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False,", "< 30: return None return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") #", "It returns a dict: # { 'text' { input_ids_as_tensor }, # 'target' {", "<Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\",", "summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for", "# [XSUM]URL[XSUM] # <URL where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of", "are external links and will open in a new window\\n\"] # Processes the", "Processes the contents of an XSUM file and returns a dict: {'text', 'summary'}", "all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news, \"/\".join([output_folder,", "{'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX", "shuffled news] => map_tokenize_news # REDUCE: [tokenized results] => [single list of tokenized", "all_texts if m is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing", "text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for processing reviews.", "link\\n\", \"These are external links and will open in a new window\\n\"] #", "all_texts = [m for m in all_texts if m is not None] print(\"Tokenizing", "content. Often these are very bad examples. if len(text) < 1024: return None", "dict: # { 'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor } }", "= p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news", "the proper token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) #", "max_length=None, pad_to_max_length=False ) # Push resultants to a simple list and return it", "# [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\",", "= 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk", "= processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"]", "from files..\") all_texts = p.map(map_read_files, files) all_texts = [m for m in all_texts", "the XSum data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large", "list of tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts =", "workflow: # MAP: [list of files to process] => list_of_news # MAP: [single", "4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX]", "and preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor", "all_texts = p.map(map_read_files, files) all_texts = [m for m in all_texts if m", "article. The format of # each file is text, with following format: #", "a map function for processing reviews. It returns a dict: # { 'text'", "large number of files, each of which corresponds to a single article. The", "tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for processing reviews. It", "of # each file is text, with following format: # [XSUM]URL[XSUM] # <URL", "this link\\n\", \"These are external links and will open in a new window\\n\"]", "= all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news,", "Don't accept content with too small of text content or title content. Often", "title = processed[\"summary\"] # Insert the title as the second sentence, forcing the", "text content or title content. Often these are very bad examples. if len(text)", "# [XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT", "in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept content with too small", "an XSUM file and returns a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath,", "title content. Often these are very bad examples. if len(text) < 1024: return", "<gh_stars>1-10 from transformers import XLNetTokenizer import glob, os from multiprocessing import Pool import", "m in all_texts if m is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news,", "\"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external links and will", "Pool(20) # Basic workflow: # MAP: [list of files to process] => list_of_news", "The format of # each file is text, with following format: # [XSUM]URL[XSUM]", "title as the second sentence, forcing the proper token types. title_enc = tok.encode(", "in a new window\\n\"] # Processes the contents of an XSUM file and", "p = Pool(20) # Basic workflow: # MAP: [list of files to process]", "by the XSum data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates a", "not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\")", "for m in all_texts if m is not None] print(\"Tokenizing news..\") all_news =", "pad_to_max_length=False ) title = processed[\"summary\"] # Insert the title as the second sentence,", "\"Copy this link\\n\", \"These are external links and will open in a new", "import torch # This function processes news articles gathered and preprocessed by the", "# Push resultants to a simple list and return it return { \"text\":", "Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder,", "function for processing reviews. It returns a dict: # { 'text' { input_ids_as_tensor", "glob, os from multiprocessing import Pool import random import torch # This function", "splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept content", "format of # each file is text, with following format: # [XSUM]URL[XSUM] #", "a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content =", ") # Push resultants to a simple list and return it return {", "article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM] #", "text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert the title as", "REDUCE: [tokenized results] => [single list of tokenized results] print(\"Reading from files..\") all_texts", "window\\n\"] # Processes the contents of an XSUM file and returns a dict:", "[single list of shuffled news] => map_tokenize_news # REDUCE: [tokenized results] => [single", "[XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\",", "=> map_tokenize_news # REDUCE: [tokenized results] => [single list of tokenized results] print(\"Reading", "len(text) < 1024: return None if len(summary) < 30: return None return {\"summary\":", "gathered and preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum # # The", "of text content or title content. Often these are very bad examples. if", "text = text.replace(junk, \"\").strip() # Don't accept content with too small of text", "= \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20)", "contents of an XSUM file and returns a dict: {'text', 'summary'} def map_read_files(filepath):", "pre-processor generates a large number of files, each of which corresponds to a", "or title content. Often these are very bad examples. if len(text) < 1024:", "p.map(map_read_files, files) all_texts = [m for m in all_texts if m is not", "[XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT =", "file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if not", "output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if", "for processing reviews. It returns a dict: # { 'text' { input_ids_as_tensor },", "with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\",", "[single list of tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts", "input_ids_as_tensor }, # 'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"]", "text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\",", "as file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted =", "add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert the title as the", "news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news =", "\"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow: # MAP: [list of files", "import Pool import random import torch # This function processes news articles gathered", "the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\",", "processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large number of files,", "m is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to", "reviews. It returns a dict: # { 'text' { input_ids_as_tensor }, # 'target'", "second sentence, forcing the proper token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None,", "dtype=torch.long), } if __name__ == \"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\"", "all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048]", "open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6", "with following format: # [XSUM]URL[XSUM] # <URL where article originates from> # [XSUM]INTRODUCTION[XSUM]", "with too small of text content or title content. Often these are very", "to a single article. The format of # each file is text, with", "splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT:", "Push resultants to a simple list and return it return { \"text\": torch.tensor(text_enc,", "and returns a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file:", "\"These are external links and will open in a new window\\n\"] # Processes", "\"Linkedin\\n\", \"Copy this link\\n\", \"These are external links and will open in a", "SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text", "each of which corresponds to a single article. The format of # each", "in all_texts if m is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts)", "preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates", "random import torch # This function processes news articles gathered and preprocessed by", "file and returns a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as", "# <Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share", "this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this", "originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article", "each file is text, with following format: # [XSUM]URL[XSUM] # <URL where article", "token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants", "print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news", "processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] #", "from multiprocessing import Pool import random import torch # This function processes news", "torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": # Fetch the", "Basic workflow: # MAP: [list of files to process] => list_of_news # MAP:", "{\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function", "= all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news, \"/\".join([output_folder, \"val.pt\"])) torch.save(test_news,", "files) all_texts = [m for m in all_texts if m is not None]", "= p.map(map_read_files, files) all_texts = [m for m in all_texts if m is", "files to process] => list_of_news # MAP: [single list of shuffled news] =>", "accept content with too small of text content or title content. Often these", "} if __name__ == \"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder)", "= XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for processing reviews. It returns", "# This function processes news articles gathered and preprocessed by the XSum data", "max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert the title as the second", "# 'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"] text_enc =", "# each file is text, with following format: # [XSUM]URL[XSUM] # <URL where", "# <URL where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article>", "def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX = 4", "{ input_ids_as_tensor }, # 'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text =", "{ input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text,", "Insert the title as the second sentence, forcing the proper token types. title_enc", "os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic", "= glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow: #", "=> list_of_news # MAP: [single list of shuffled news] => map_tokenize_news # REDUCE:", "text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert", "print(\"Writing news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news", "following format: # [XSUM]URL[XSUM] # <URL where article originates from> # [XSUM]INTRODUCTION[XSUM] #", "None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news)", "if __name__ == \"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files", "bad examples. if len(text) < 1024: return None if len(summary) < 30: return", "open in a new window\\n\"] # Processes the contents of an XSUM file", "[m for m in all_texts if m is not None] print(\"Tokenizing news..\") all_news", "results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts = [m for m", "as the second sentence, forcing the proper token types. title_enc = tok.encode( title,", "all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder,", "XLNetTokenizer import glob, os from multiprocessing import Pool import random import torch #", "encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted", "glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow: # MAP:", "val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder)", "= tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert the", "data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large number of", "= splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip()", "is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news to output", "format: # [XSUM]URL[XSUM] # <URL where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary", "if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news, \"/\".join([output_folder, \"val.pt\"])) torch.save(test_news, \"/\".join([output_folder, \"test.pt\"]))", "This function processes news articles gathered and preprocessed by the XSum data processor:", "= text.replace(junk, \"\").strip() # Don't accept content with too small of text content", "of shuffled news] => map_tokenize_news # REDUCE: [tokenized results] => [single list of", "from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article text>", "}, # 'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"] text_enc", "os from multiprocessing import Pool import random import torch # This function processes", "news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news =", "which corresponds to a single article. The format of # each file is", "=> [single list of tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files)", "# REDUCE: [tokenized results] => [single list of tokenized results] print(\"Reading from files..\")", "[\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy", "content with too small of text content or title content. Often these are", "XSum data processor: # https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large number", "[tokenized results] => [single list of tokenized results] print(\"Reading from files..\") all_texts =", "junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept content with too", "1024: return None if len(summary) < 30: return None return {\"summary\": summary, \"text\":", "small of text content or title content. Often these are very bad examples.", "p.map(map_tokenize_news, all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news =", "Pool import random import torch # This function processes news articles gathered and", "<Summary of the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this", "tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to a simple list", "new window\\n\"] # Processes the contents of an XSUM file and returns a", "generates a large number of files, each of which corresponds to a single", "files, each of which corresponds to a single article. The format of #", "map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX", "files..\") all_texts = p.map(map_read_files, files) all_texts = [m for m in all_texts if", "= [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\",", "the contents of an XSUM file and returns a dict: {'text', 'summary'} def", "transformers import XLNetTokenizer import glob, os from multiprocessing import Pool import random import", "news] => map_tokenize_news # REDUCE: [tokenized results] => [single list of tokenized results]", "to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144] train_news = all_news[6144:]", "return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__", "# { 'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor } } def", "a large number of files, each of which corresponds to a single article.", "files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow:", "a single article. The format of # each file is text, with following", "\"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external", "[list of files to process] => list_of_news # MAP: [single list of shuffled", "\"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) #", "news articles gathered and preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum #", "to process] => list_of_news # MAP: [single list of shuffled news] => map_tokenize_news", "TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for", "XSUM file and returns a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\")", "list and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), }", "< 1024: return None if len(summary) < 30: return None return {\"summary\": summary,", "import random import torch # This function processes news articles gathered and preprocessed", "# Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder =", "return None return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is", "text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title =", "simple list and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long),", "content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary", "add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to a simple list and return", "None return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a", "summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk,", "text.replace(junk, \"\").strip() # Don't accept content with too small of text content or", "text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't", "of tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts = [m", "forcing the proper token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False )", "the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"])", "\"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external links and", "6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in", "'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX =", "return None if len(summary) < 30: return None return {\"summary\": summary, \"text\": text}", "torch # This function processes news articles gathered and preprocessed by the XSum", "external links and will open in a new window\\n\"] # Processes the contents", "= all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news,", "articles gathered and preprocessed by the XSum data processor: # https://github.com/EdinburghNLP/XSum # #", "output_folder = \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow: # MAP: [list", "corresponds to a single article. The format of # each file is text,", "JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() # Don't accept content with too small of", "\"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": # Fetch the news. folder", "map_tokenize_news # REDUCE: [tokenized results] => [single list of tokenized results] print(\"Reading from", "\"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for processing", "{ 'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor } } def map_tokenize_news(processed):", "all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"])) torch.save(val_news, \"/\".join([output_folder, \"val.pt\"])) torch.save(test_news, \"/\".join([output_folder,", "of files, each of which corresponds to a single article. The format of", "the second sentence, forcing the proper token types. title_enc = tok.encode( title, add_special_tokens=False,", "single article. The format of # each file is text, with following format:", "# Insert the title as the second sentence, forcing the proper token types.", "links and will open in a new window\\n\"] # Processes the contents of", "# Don't accept content with too small of text content or title content.", "are very bad examples. if len(text) < 1024: return None if len(summary) <", "function processes news articles gathered and preprocessed by the XSum data processor: #", "it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ ==", "if m is not None] print(\"Tokenizing news..\") all_news = p.map(map_tokenize_news, all_texts) print(\"Writing news", "\"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external links and will open in", "is text, with following format: # [XSUM]URL[XSUM] # <URL where article originates from>", "= processed[\"summary\"] # Insert the title as the second sentence, forcing the proper", "map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title", "= tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to a simple", "# <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\",", "# Basic workflow: # MAP: [list of files to process] => list_of_news #", "examples. if len(text) < 1024: return None if len(summary) < 30: return None", "content or title content. Often these are very bad examples. if len(text) <", "of which corresponds to a single article. The format of # each file", "'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode(", "all_texts) print(\"Writing news to output file.\") random.shuffle(all_news) val_news = all_news[0:2048] test_news = all_news[2048:6144]", ") title = processed[\"summary\"] # Insert the title as the second sentence, forcing", "proper token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push", "} def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False", "splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text = text.replace(junk, \"\").strip() #", "\"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These", "a simple list and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc,", "'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor } } def map_tokenize_news(processed): text", "processing reviews. It returns a dict: # { 'text' { input_ids_as_tensor }, #", "__name__ == \"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files =", "\"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": # Fetch", "dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content = file.read()", "\"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external links and will open", "process] => list_of_news # MAP: [single list of shuffled news] => map_tokenize_news #", "folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p =", "to a simple list and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\":", "file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip()", "XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This is a map function for processing reviews. It returns a", "multiprocessing import Pool import random import torch # This function processes news articles", "The pre-processor generates a large number of files, each of which corresponds to", "import XLNetTokenizer import glob, os from multiprocessing import Pool import random import torch", "sentence, forcing the proper token types. title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False", "30: return None return {\"summary\": summary, \"text\": text} tok = XLNetTokenizer.from_pretrained(\"xlnet-base-cased\") # This", "= 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text =", "article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\",", "from transformers import XLNetTokenizer import glob, os from multiprocessing import Pool import random", "will open in a new window\\n\"] # Processes the contents of an XSUM", "with open(filepath, encoding=\"utf-8\") as file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX =", "list_of_news # MAP: [single list of shuffled news] => map_tokenize_news # REDUCE: [tokenized", "where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article> # [XSUM]RESTBODY[XSUM]", "a dict: # { 'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor }", "number of files, each of which corresponds to a single article. The format", "MAP: [list of files to process] => list_of_news # MAP: [single list of", "these are very bad examples. if len(text) < 1024: return None if len(summary)", "processed[\"summary\"] # Insert the title as the second sentence, forcing the proper token", "== \"__main__\": # Fetch the news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\")", "file: content = file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\")", "= Pool(20) # Basic workflow: # MAP: [list of files to process] =>", "= file.read() SUMMARY_INDEX = 4 TEXT_INDEX = 6 splitted = content.split(\"[XSUM]\") summary =", "= content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text", "None if len(summary) < 30: return None return {\"summary\": summary, \"text\": text} tok", "torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\": # Fetch the news. folder =", "\"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\", \"Linkedin\\n\", \"Copy this link\\n\", \"These are external links", "news. folder = \"C:/Users/jbetk/Documents/data/ml/xsum/xsum-extracts-from-downloads\" os.chdir(folder) files = glob.glob(\"*.data\") output_folder = \"/\".join([folder, \"outputs\"]) p", "too small of text content or title content. Often these are very bad", "list of shuffled news] => map_tokenize_news # REDUCE: [tokenized results] => [single list", "# Processes the contents of an XSUM file and returns a dict: {'text',", "JUNK_HEADER_TEXT = [\"Share this with\\n\", \"Email\\n\", \"FaceBook\\n\", \"Facebook\\n\", \"Messenger\\n\", \"Twitter\\n\", \"Pinterest\\n\", \"WhatsApp\\n\", \"LinkedIn\\n\",", "returns a dict: # { 'text' { input_ids_as_tensor }, # 'target' { input_ids_as_tensor", "# The pre-processor generates a large number of files, each of which corresponds", "of files to process] => list_of_news # MAP: [single list of shuffled news]", "title_enc = tok.encode( title, add_special_tokens=False, max_length=None, pad_to_max_length=False ) # Push resultants to a", "tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False ) title = processed[\"summary\"] # Insert the title", "MAP: [single list of shuffled news] => map_tokenize_news # REDUCE: [tokenized results] =>", "content.split(\"[XSUM]\") summary = splitted[SUMMARY_INDEX].strip() text = splitted[TEXT_INDEX] for junk in JUNK_HEADER_TEXT: text =", "https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large number of files, each of", "of the article> # [XSUM]RESTBODY[XSUM] # <Article text> JUNK_HEADER_TEXT = [\"Share this with\\n\",", "file is text, with following format: # [XSUM]URL[XSUM] # <URL where article originates", "results] => [single list of tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files,", "text, with following format: # [XSUM]URL[XSUM] # <URL where article originates from> #", "returns a dict: {'text', 'summary'} def map_read_files(filepath): with open(filepath, encoding=\"utf-8\") as file: content", "\"\").strip() # Don't accept content with too small of text content or title", "and will open in a new window\\n\"] # Processes the contents of an", "Often these are very bad examples. if len(text) < 1024: return None if", "return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if __name__ == \"__main__\":", "and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long), \"target\": torch.tensor(title_enc, dtype=torch.long), } if", "= \"/\".join([folder, \"outputs\"]) p = Pool(20) # Basic workflow: # MAP: [list of", "[XSUM]URL[XSUM] # <URL where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the", "test_news = all_news[2048:6144] train_news = all_news[6144:] if not os.path.exists(output_folder): os.makedirs(output_folder) torch.save(train_news, \"/\".join([output_folder, \"train.pt\"]))", "<URL where article originates from> # [XSUM]INTRODUCTION[XSUM] # <Summary of the article> #", "is a map function for processing reviews. It returns a dict: # {", "tokenized results] print(\"Reading from files..\") all_texts = p.map(map_read_files, files) all_texts = [m for", "resultants to a simple list and return it return { \"text\": torch.tensor(text_enc, dtype=torch.long),", "This is a map function for processing reviews. It returns a dict: #", "# https://github.com/EdinburghNLP/XSum # # The pre-processor generates a large number of files, each", "def map_tokenize_news(processed): text = processed[\"text\"] text_enc = tok.encode( text, add_special_tokens=False, max_length=None, pad_to_max_length=False )" ]
[ "urban etc.). Supports the indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project", "indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. ..", "Easily calculate spectral indices (vegetation, water, urban etc.). Supports the indices defined in", "(vegetation, water, urban etc.). Supports the indices defined in the `Awesome Spectral Indices", "etc.). Supports the indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by", "<https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1 \"\"\" from openeo.extra.spectral_indices.spectral_indices import *", "water, urban etc.). Supports the indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_", "the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1 \"\"\"", "indices (vegetation, water, urban etc.). Supports the indices defined in the `Awesome Spectral", "in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1", "Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1 \"\"\" from openeo.extra.spectral_indices.spectral_indices import", "calculate spectral indices (vegetation, water, urban etc.). Supports the indices defined in the", "`Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1 \"\"\" from", "\"\"\" Easily calculate spectral indices (vegetation, water, urban etc.). Supports the indices defined", "Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded:: 0.9.1 \"\"\" from openeo.extra.spectral_indices.spectral_indices", "Supports the indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME>", "the indices defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_.", "defined in the `Awesome Spectral Indices <https://awesome-ee-spectral-indices.readthedocs.io/>`_ project by `<NAME> <https://github.com/davemlz>`_. .. versionadded::", "spectral indices (vegetation, water, urban etc.). Supports the indices defined in the `Awesome" ]
[ "import AssignmentSerializer from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import PoolSerializer from .label", "from .assignment import AssignmentSerializer from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import PoolSerializer", ".assignment import AssignmentSerializer from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import PoolSerializer from", "from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import PoolSerializer from .label import LabelSerializer", "AssignmentSerializer from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import PoolSerializer from .label import", "<gh_stars>1-10 from .assignment import AssignmentSerializer from .cidr import CIDRSerializer, RecursiveCIDRSerializer from .pool import" ]
[ "= \"0.1.0\" from .koyeb_nb2 import koyeb_nb2 # from .nb2chan import nb2chan __all__ =", "__version__ = \"0.1.0\" from .koyeb_nb2 import koyeb_nb2 # from .nb2chan import nb2chan __all__", "\"0.1.0\" from .koyeb_nb2 import koyeb_nb2 # from .nb2chan import nb2chan __all__ = (\"koyeb_nb2\",)", "\"\"\"Init.\"\"\" __version__ = \"0.1.0\" from .koyeb_nb2 import koyeb_nb2 # from .nb2chan import nb2chan" ]
[ "0, 1], [-1, -1, 2] ] \"\"\" # time complexity: O(n^2), space complexity:", "] \"\"\" # time complexity: O(n^2), space complexity: O(1) # this is such", "high = len(nums) - 1 while low < high: if nums[i] + nums[low]", "nums = [-1, 0, 1, 2, -1, -4], A solution set is: [", "the discussion area. The idea is to first sort the nums in O(nlogn),", "inspiration from @christopherwu0529 in the discussion area. The idea is to first sort", "[-1, 0, 1, 2, -1, -4], A solution set is: [ [-1, 0,", "or nums[i] != nums[i-1]: low = i + 1 high = len(nums) -", "nums: List[int]) -> List[List[int]]: nums.sort() result = [] i = 0 while i", "[-1, -1, 2] ] \"\"\" # time complexity: O(n^2), space complexity: O(1) #", "triplets in the array which gives the sum of zero. Note: The solution", "i = 0 while i < len(nums) - 2: if nums[i] > 0:", "b + c = 0? Find all unique triplets in the array which", "high and nums[low] == nums[low+1]: low += 1 while low < high and", "a classic question and I got the inspiration from @christopherwu0529 in the discussion", "= [-1, 0, 1, 2, -1, -4], A solution set is: [ [-1,", "+= 1 high -= 1 elif nums[i] + nums[low] + nums[high] < 0:", "== 0 or nums[i] != nums[i-1]: low = i + 1 high =", "low += 1 high -= 1 elif nums[i] + nums[low] + nums[high] <", "number is larger than 0, because there will be no way we can", "< 0: low += 1 else: high -= 1 i += 1 return", "idea is to first sort the nums in O(nlogn), then for each number", "nums[i] + nums[low] + nums[high] < 0: low += 1 else: high -=", "0 while i < len(nums) - 2: if nums[i] > 0: break if", "faster, early stop if the fixed number is larger than 0, because there", "+ nums[high] < 0: low += 1 else: high -= 1 i +=", "+ nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low < high", "Note: The solution set must not contain duplicate triplets. Example: Given array nums", "complexity: O(1) # this is such a classic question and I got the", "\"\"\" https://leetcode.com/problems/3sum/ Given an array nums of n integers, are there elements a,", "for each number in this list, two pointers go over the number after", "len(nums) - 2: if nums[i] > 0: break if i == 0 or", "1 low += 1 high -= 1 elif nums[i] + nums[low] + nums[high]", "from @christopherwu0529 in the discussion area. The idea is to first sort the", "elif nums[i] + nums[low] + nums[high] < 0: low += 1 else: high", "over the number after it. # To make it faster, early stop if", "result = [] i = 0 while i < len(nums) - 2: if", "an array nums of n integers, are there elements a, b, c in", "equal to 0. class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result", "List[int]) -> List[List[int]]: nums.sort() result = [] i = 0 while i <", "O(1) # this is such a classic question and I got the inspiration", "[] i = 0 while i < len(nums) - 2: if nums[i] >", "= 0? Find all unique triplets in the array which gives the sum", "if nums[i] > 0: break if i == 0 or nums[i] != nums[i-1]:", "0: break if i == 0 or nums[i] != nums[i-1]: low = i", "is larger than 0, because there will be no way we can make", "number in this list, two pointers go over the number after it. #", "nums of n integers, are there elements a, b, c in nums such", "\"\"\" # time complexity: O(n^2), space complexity: O(1) # this is such a", "which gives the sum of zero. Note: The solution set must not contain", "time complexity: O(n^2), space complexity: O(1) # this is such a classic question", "to first sort the nums in O(nlogn), then for each number in this", "each number in this list, two pointers go over the number after it.", "there elements a, b, c in nums such that a + b +", "no way we can make the sum equal to 0. class Solution: def", "n integers, are there elements a, b, c in nums such that a", "nums[high]]) while low < high and nums[low] == nums[low+1]: low += 1 while", "0: low += 1 else: high -= 1 i += 1 return result", "nums such that a + b + c = 0? Find all unique", "2] ] \"\"\" # time complexity: O(n^2), space complexity: O(1) # this is", "@christopherwu0529 in the discussion area. The idea is to first sort the nums", "nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low < high and nums[low] ==", "then for each number in this list, two pointers go over the number", "array nums of n integers, are there elements a, b, c in nums", "while i < len(nums) - 2: if nums[i] > 0: break if i", "fixed number is larger than 0, because there will be no way we", "b, c in nums such that a + b + c = 0?", "the nums in O(nlogn), then for each number in this list, two pointers", "https://leetcode.com/problems/3sum/ Given an array nums of n integers, are there elements a, b,", "in nums such that a + b + c = 0? Find all", "== nums[low+1]: low += 1 while low < high and nums[high] == nums[high-1]:", "class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result = [] i", "sum equal to 0. class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort()", "1 while low < high: if nums[i] + nums[low] + nums[high] == 0:", "-= 1 low += 1 high -= 1 elif nums[i] + nums[low] +", "there will be no way we can make the sum equal to 0.", "== nums[high-1]: high -= 1 low += 1 high -= 1 elif nums[i]", "nums[low] == nums[low+1]: low += 1 while low < high and nums[high] ==", "The solution set must not contain duplicate triplets. Example: Given array nums =", "To make it faster, early stop if the fixed number is larger than", "< len(nums) - 2: if nums[i] > 0: break if i == 0", "set must not contain duplicate triplets. Example: Given array nums = [-1, 0,", "def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result = [] i = 0", "i == 0 or nums[i] != nums[i-1]: low = i + 1 high", "the inspiration from @christopherwu0529 in the discussion area. The idea is to first", "nums[i] + nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low <", "low < high and nums[low] == nums[low+1]: low += 1 while low <", "duplicate triplets. Example: Given array nums = [-1, 0, 1, 2, -1, -4],", "- 2: if nums[i] > 0: break if i == 0 or nums[i]", "in the discussion area. The idea is to first sort the nums in", "if i == 0 or nums[i] != nums[i-1]: low = i + 1", "c = 0? Find all unique triplets in the array which gives the", "Given array nums = [-1, 0, 1, 2, -1, -4], A solution set", "nums[i] != nums[i-1]: low = i + 1 high = len(nums) - 1", "and nums[high] == nums[high-1]: high -= 1 low += 1 high -= 1", "Find all unique triplets in the array which gives the sum of zero.", "all unique triplets in the array which gives the sum of zero. Note:", "integers, are there elements a, b, c in nums such that a +", "2, -1, -4], A solution set is: [ [-1, 0, 1], [-1, -1,", "list, two pointers go over the number after it. # To make it", "1 high -= 1 elif nums[i] + nums[low] + nums[high] < 0: low", "it. # To make it faster, early stop if the fixed number is", "way we can make the sum equal to 0. class Solution: def threeSum(self,", "such that a + b + c = 0? Find all unique triplets", "classic question and I got the inspiration from @christopherwu0529 in the discussion area.", "is to first sort the nums in O(nlogn), then for each number in", "because there will be no way we can make the sum equal to", "and nums[low] == nums[low+1]: low += 1 while low < high and nums[high]", "i + 1 high = len(nums) - 1 while low < high: if", "will be no way we can make the sum equal to 0. class", "solution set is: [ [-1, 0, 1], [-1, -1, 2] ] \"\"\" #", "I got the inspiration from @christopherwu0529 in the discussion area. The idea is", "in this list, two pointers go over the number after it. # To", "-= 1 elif nums[i] + nums[low] + nums[high] < 0: low += 1", "if nums[i] + nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low", "2: if nums[i] > 0: break if i == 0 or nums[i] !=", "stop if the fixed number is larger than 0, because there will be", "larger than 0, because there will be no way we can make the", "1 while low < high and nums[high] == nums[high-1]: high -= 1 low", "early stop if the fixed number is larger than 0, because there will", "elements a, b, c in nums such that a + b + c", "nums.sort() result = [] i = 0 while i < len(nums) - 2:", "number after it. # To make it faster, early stop if the fixed", "high: if nums[i] + nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while", "it faster, early stop if the fixed number is larger than 0, because", "nums[low+1]: low += 1 while low < high and nums[high] == nums[high-1]: high", "two pointers go over the number after it. # To make it faster,", "break if i == 0 or nums[i] != nums[i-1]: low = i +", "this is such a classic question and I got the inspiration from @christopherwu0529", "of zero. Note: The solution set must not contain duplicate triplets. Example: Given", "solution set must not contain duplicate triplets. Example: Given array nums = [-1,", "the fixed number is larger than 0, because there will be no way", "such a classic question and I got the inspiration from @christopherwu0529 in the", "-1, 2] ] \"\"\" # time complexity: O(n^2), space complexity: O(1) # this", "0, because there will be no way we can make the sum equal", "< high and nums[low] == nums[low+1]: low += 1 while low < high", "-> List[List[int]]: nums.sort() result = [] i = 0 while i < len(nums)", "Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result = [] i =", "in O(nlogn), then for each number in this list, two pointers go over", "nums[low] + nums[high] < 0: low += 1 else: high -= 1 i", "0: result.append([nums[i], nums[low], nums[high]]) while low < high and nums[low] == nums[low+1]: low", "while low < high and nums[high] == nums[high-1]: high -= 1 low +=", "nums[i] > 0: break if i == 0 or nums[i] != nums[i-1]: low", "c in nums such that a + b + c = 0? Find", "unique triplets in the array which gives the sum of zero. Note: The", "= [] i = 0 while i < len(nums) - 2: if nums[i]", "nums[i-1]: low = i + 1 high = len(nums) - 1 while low", "can make the sum equal to 0. class Solution: def threeSum(self, nums: List[int])", "make it faster, early stop if the fixed number is larger than 0,", "+= 1 while low < high and nums[high] == nums[high-1]: high -= 1", "contain duplicate triplets. Example: Given array nums = [-1, 0, 1, 2, -1,", "The idea is to first sort the nums in O(nlogn), then for each", "zero. Note: The solution set must not contain duplicate triplets. Example: Given array", "the number after it. # To make it faster, early stop if the", "if the fixed number is larger than 0, because there will be no", "0? Find all unique triplets in the array which gives the sum of", "the sum of zero. Note: The solution set must not contain duplicate triplets.", "+ b + c = 0? Find all unique triplets in the array", "pointers go over the number after it. # To make it faster, early", "nums[high-1]: high -= 1 low += 1 high -= 1 elif nums[i] +", "high -= 1 low += 1 high -= 1 elif nums[i] + nums[low]", "1 high = len(nums) - 1 while low < high: if nums[i] +", "1 elif nums[i] + nums[low] + nums[high] < 0: low += 1 else:", "set is: [ [-1, 0, 1], [-1, -1, 2] ] \"\"\" # time", "array nums = [-1, 0, 1, 2, -1, -4], A solution set is:", "and I got the inspiration from @christopherwu0529 in the discussion area. The idea", "nums in O(nlogn), then for each number in this list, two pointers go", "be no way we can make the sum equal to 0. class Solution:", "low = i + 1 high = len(nums) - 1 while low <", "O(nlogn), then for each number in this list, two pointers go over the", "= 0 while i < len(nums) - 2: if nums[i] > 0: break", "# this is such a classic question and I got the inspiration from", "than 0, because there will be no way we can make the sum", "not contain duplicate triplets. Example: Given array nums = [-1, 0, 1, 2,", "gives the sum of zero. Note: The solution set must not contain duplicate", "threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result = [] i = 0 while", "len(nums) - 1 while low < high: if nums[i] + nums[low] + nums[high]", "is: [ [-1, 0, 1], [-1, -1, 2] ] \"\"\" # time complexity:", "while low < high and nums[low] == nums[low+1]: low += 1 while low", "# time complexity: O(n^2), space complexity: O(1) # this is such a classic", "[-1, 0, 1], [-1, -1, 2] ] \"\"\" # time complexity: O(n^2), space", "i < len(nums) - 2: if nums[i] > 0: break if i ==", "-4], A solution set is: [ [-1, 0, 1], [-1, -1, 2] ]", "discussion area. The idea is to first sort the nums in O(nlogn), then", "that a + b + c = 0? Find all unique triplets in", "nums[high] < 0: low += 1 else: high -= 1 i += 1", "Given an array nums of n integers, are there elements a, b, c", "first sort the nums in O(nlogn), then for each number in this list,", "O(n^2), space complexity: O(1) # this is such a classic question and I", "-1, -4], A solution set is: [ [-1, 0, 1], [-1, -1, 2]", "question and I got the inspiration from @christopherwu0529 in the discussion area. The", "result.append([nums[i], nums[low], nums[high]]) while low < high and nums[low] == nums[low+1]: low +=", "after it. # To make it faster, early stop if the fixed number", "the sum equal to 0. class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]:", "is such a classic question and I got the inspiration from @christopherwu0529 in", "Example: Given array nums = [-1, 0, 1, 2, -1, -4], A solution", "> 0: break if i == 0 or nums[i] != nums[i-1]: low =", "= i + 1 high = len(nums) - 1 while low < high:", "while low < high: if nums[i] + nums[low] + nums[high] == 0: result.append([nums[i],", "- 1 while low < high: if nums[i] + nums[low] + nums[high] ==", "a + b + c = 0? Find all unique triplets in the", "must not contain duplicate triplets. Example: Given array nums = [-1, 0, 1,", "triplets. Example: Given array nums = [-1, 0, 1, 2, -1, -4], A", "List[List[int]]: nums.sort() result = [] i = 0 while i < len(nums) -", "low < high: if nums[i] + nums[low] + nums[high] == 0: result.append([nums[i], nums[low],", "+ c = 0? Find all unique triplets in the array which gives", "got the inspiration from @christopherwu0529 in the discussion area. The idea is to", "!= nums[i-1]: low = i + 1 high = len(nums) - 1 while", "== 0: result.append([nums[i], nums[low], nums[high]]) while low < high and nums[low] == nums[low+1]:", "+ 1 high = len(nums) - 1 while low < high: if nums[i]", "the array which gives the sum of zero. Note: The solution set must", "0. class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result = []", "of n integers, are there elements a, b, c in nums such that", "# To make it faster, early stop if the fixed number is larger", "high and nums[high] == nums[high-1]: high -= 1 low += 1 high -=", "this list, two pointers go over the number after it. # To make", "0, 1, 2, -1, -4], A solution set is: [ [-1, 0, 1],", "sort the nums in O(nlogn), then for each number in this list, two", "sum of zero. Note: The solution set must not contain duplicate triplets. Example:", "to 0. class Solution: def threeSum(self, nums: List[int]) -> List[List[int]]: nums.sort() result =", "+ nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low < high and nums[low]", "+ nums[low] + nums[high] < 0: low += 1 else: high -= 1", "high -= 1 elif nums[i] + nums[low] + nums[high] < 0: low +=", "array which gives the sum of zero. Note: The solution set must not", "space complexity: O(1) # this is such a classic question and I got", "low += 1 while low < high and nums[high] == nums[high-1]: high -=", "go over the number after it. # To make it faster, early stop", "a, b, c in nums such that a + b + c =", "complexity: O(n^2), space complexity: O(1) # this is such a classic question and", "we can make the sum equal to 0. class Solution: def threeSum(self, nums:", "nums[low], nums[high]]) while low < high and nums[low] == nums[low+1]: low += 1", "low < high and nums[high] == nums[high-1]: high -= 1 low += 1", "0 or nums[i] != nums[i-1]: low = i + 1 high = len(nums)", "= len(nums) - 1 while low < high: if nums[i] + nums[low] +", "nums[high] == nums[high-1]: high -= 1 low += 1 high -= 1 elif", "in the array which gives the sum of zero. Note: The solution set", "1, 2, -1, -4], A solution set is: [ [-1, 0, 1], [-1,", "[ [-1, 0, 1], [-1, -1, 2] ] \"\"\" # time complexity: O(n^2),", "area. The idea is to first sort the nums in O(nlogn), then for", "make the sum equal to 0. class Solution: def threeSum(self, nums: List[int]) ->", "nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]]) while low < high and", "are there elements a, b, c in nums such that a + b", "1], [-1, -1, 2] ] \"\"\" # time complexity: O(n^2), space complexity: O(1)", "< high and nums[high] == nums[high-1]: high -= 1 low += 1 high", "< high: if nums[i] + nums[low] + nums[high] == 0: result.append([nums[i], nums[low], nums[high]])", "A solution set is: [ [-1, 0, 1], [-1, -1, 2] ] \"\"\"" ]
[ "import interactive_axhline as axhline from .pyplot import interactive_axvline as axvline from .pyplot import", ".pyplot import interactive_hist as hist from .pyplot import interactive_imshow as imshow from .pyplot", "interactive_hist as hist from .pyplot import interactive_imshow as imshow from .pyplot import interactive_plot", "as imshow from .pyplot import interactive_plot as plot from .pyplot import interactive_scatter as", "import interactive_hist as hist from .pyplot import interactive_imshow as imshow from .pyplot import", "axhline from .pyplot import interactive_axvline as axvline from .pyplot import interactive_hist as hist", "import interactive_axvline as axvline from .pyplot import interactive_hist as hist from .pyplot import", "as axvline from .pyplot import interactive_hist as hist from .pyplot import interactive_imshow as", ".pyplot import interactive_title as title from .pyplot import interactive_xlabel as xlabel from .pyplot", "as plot from .pyplot import interactive_scatter as scatter from .pyplot import interactive_title as", ".pyplot import interactive_scatter as scatter from .pyplot import interactive_title as title from .pyplot", ".pyplot import interactive_imshow as imshow from .pyplot import interactive_plot as plot from .pyplot", ".pyplot import interactive_plot as plot from .pyplot import interactive_scatter as scatter from .pyplot", "scatter from .pyplot import interactive_title as title from .pyplot import interactive_xlabel as xlabel", "axvline from .pyplot import interactive_hist as hist from .pyplot import interactive_imshow as imshow", "interactive_axvline as axvline from .pyplot import interactive_hist as hist from .pyplot import interactive_imshow", "interactive_scatter as scatter from .pyplot import interactive_title as title from .pyplot import interactive_xlabel", "as hist from .pyplot import interactive_imshow as imshow from .pyplot import interactive_plot as", "from .pyplot import interactive_scatter as scatter from .pyplot import interactive_title as title from", "import interactive_scatter as scatter from .pyplot import interactive_title as title from .pyplot import", "title from .pyplot import interactive_xlabel as xlabel from .pyplot import interactive_ylabel as ylabel", "from .pyplot import interactive_axhline as axhline from .pyplot import interactive_axvline as axvline from", "from .pyplot import interactive_plot as plot from .pyplot import interactive_scatter as scatter from", "hist from .pyplot import interactive_imshow as imshow from .pyplot import interactive_plot as plot", "as title from .pyplot import interactive_xlabel as xlabel from .pyplot import interactive_ylabel as", "from .pyplot import interactive_title as title from .pyplot import interactive_xlabel as xlabel from", "from .pyplot import interactive_hist as hist from .pyplot import interactive_imshow as imshow from", "import interactive_title as title from .pyplot import interactive_xlabel as xlabel from .pyplot import", "as axhline from .pyplot import interactive_axvline as axvline from .pyplot import interactive_hist as", "from .pyplot import interactive_axvline as axvline from .pyplot import interactive_hist as hist from", "interactive_title as title from .pyplot import interactive_xlabel as xlabel from .pyplot import interactive_ylabel", ".pyplot import interactive_axvline as axvline from .pyplot import interactive_hist as hist from .pyplot", "from .pyplot import interactive_imshow as imshow from .pyplot import interactive_plot as plot from", "import interactive_imshow as imshow from .pyplot import interactive_plot as plot from .pyplot import", ".pyplot import interactive_axhline as axhline from .pyplot import interactive_axvline as axvline from .pyplot", "as scatter from .pyplot import interactive_title as title from .pyplot import interactive_xlabel as", "imshow from .pyplot import interactive_plot as plot from .pyplot import interactive_scatter as scatter", "import interactive_plot as plot from .pyplot import interactive_scatter as scatter from .pyplot import", "interactive_imshow as imshow from .pyplot import interactive_plot as plot from .pyplot import interactive_scatter", "interactive_axhline as axhline from .pyplot import interactive_axvline as axvline from .pyplot import interactive_hist", "plot from .pyplot import interactive_scatter as scatter from .pyplot import interactive_title as title", "interactive_plot as plot from .pyplot import interactive_scatter as scatter from .pyplot import interactive_title" ]
[ "self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def", "割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth =", "self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成", "= self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で", "select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate", "arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: #", "和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL -", "self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def init(self, problem): self.problem", "range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys", "import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max,", "self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else:", "as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False,", "select_rate self.change_rate = change_rate def init(self, problem): self.problem = problem self.count = 0", "self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def", "return self.harmonys def step(self): # 新しいharmonyを作成 arr = [] for i in range(self.problem.size):", "(random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 #", "< self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if", "= 0 self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda", "bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def init(self, problem):", "harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate:", "* (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i] + bandwidth", "..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ):", "if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth", "continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更", "を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n", "for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def", "problem): self.problem = problem self.count = 0 self.harmonys = [] for _ in", "import math import random from ..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import", "self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i])", "self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate", "import random from ..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class", "harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate", "0 self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x:", "def init(self, problem): self.problem = problem self.count = 0 self.harmonys = [] for", "h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: #", "harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore()", "= problem self.count = 0 self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create())", "= [] for i in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal())", "# 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() <", "in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr", "self.count = 0 self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self):", "# 割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth", "self.harmonys def step(self): # 新しいharmonyを作成 arr = [] for i in range(self.problem.size): if", "self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth", "(self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i] + bandwidth *", "if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL)", "* (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1", "= change_rate def init(self, problem): self.problem = problem self.count = 0 self.harmonys =", "arr = [] for i in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成", "else: bandwidth = self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else:", "self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random()", "random from ..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm):", "for i in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue #", "self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n)", "IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max =", "self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i] +", "self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore() < harmony.getScore():", "step(self): # 新しいharmonyを作成 arr = [] for i in range(self.problem.size): if random.random() <", "arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え", "= harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate =", "in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return", "import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max", "= self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore() <", "range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr =", "# harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if", "Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth", "和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore())", "+= 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore() < harmony.getScore(): self.harmonys[0] =", "x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr =", "新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray() if random.random() < self.change_rate:", "[] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1]", "= bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def init(self,", "else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda", "# 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL", "新しいharmonyを作成 arr = [] for i in range(self.problem.size): if random.random() < self.select_rate: #", "= h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony =", "x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr", "self.harmony_max-1)].getArray() if random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定", "1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore() < harmony.getScore(): self.harmonys[0] = harmony", "- self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1)", "random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0, self.harmony_max-1)].getArray()", "def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth =", "self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth *", "return self.harmonys[-1] def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr = []", "): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate", "if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択 h_arr = self.harmonys[random.randint(0,", "math import random from ..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm", "enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate", "class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max", "arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if", "def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def step(self):", "random.random() < self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth =", "def step(self): # 新しいharmonyを作成 arr = [] for i in range(self.problem.size): if random.random()", "getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self): return self.harmonys def step(self): #", "enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def init(self, problem): self.problem = problem", "= self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製", "bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count +=", "change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate =", "i in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue # harmonyを1つ選択", "bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate =", "n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony", "[] for i in range(self.problem.size): if random.random() < self.select_rate: # 新しく和音を生成 arr.append(self.problem.randomVal()) continue", "# 新しいharmonyを作成 arr = [] for i in range(self.problem.size): if random.random() < self.select_rate:", "bandwidth を指定 bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth", "problem self.count = 0 self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def", "self.change_rate = change_rate def init(self, problem): self.problem = problem self.count = 0 self.harmonys", "_ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return self.harmonys[-1] def getElements(self):", "h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr)", "init(self, problem): self.problem = problem self.count = 0 self.harmonys = [] for _", "# 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x:", "harmony_max self.bandwidth = bandwidth self.enable_bandwidth_rate = enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate", "..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self,", "= enable_bandwidth_rate self.select_rate = select_rate self.change_rate = change_rate def init(self, problem): self.problem =", "change_rate def init(self, problem): self.problem = problem self.count = 0 self.harmonys = []", "AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1,", "getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr = [] for i in", "from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3,", "< self.change_rate: # 和音を変更 if self.enable_bandwidth_rate: # 割合で bandwidth を指定 bandwidth = self.bandwidth", "__init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8, change_rate=0.3, ): self.harmony_max = harmony_max self.bandwidth = bandwidth", "bandwidth = self.bandwidth n = h_arr[i] + bandwidth * (random.random()*2-1) arr.append(n) else: #", "bandwidth = self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n =", "self.harmonys = [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore())", "from ..algorithm_common import AlgorithmCommon as AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def", "= [] for _ in range(self.harmony_max): self.harmonys.append(problem.create()) def getMaxElement(self): self.harmonys.sort(key=lambda x: x.getScore()) return", "self.count += 1 # 新しいharmonyが最悪harmonyより評価が高ければ置き換え self.harmonys.sort(key=lambda x: x.getScore()) if self.harmonys[0].getScore() < harmony.getScore(): self.harmonys[0]", "self.problem = problem self.count = 0 self.harmonys = [] for _ in range(self.harmony_max):", "+ bandwidth * (random.random()*2-1) arr.append(n) else: # 和音を複製 arr.append(h_arr[i]) harmony = self.problem.create(arr) self.count", "= self.bandwidth * (self.problem.MAX_VAL - self.problem.MIN_VAL) else: bandwidth = self.bandwidth n = h_arr[i]", "self.select_rate = select_rate self.change_rate = change_rate def init(self, problem): self.problem = problem self.count", "= select_rate self.change_rate = change_rate def init(self, problem): self.problem = problem self.count =", "def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr = [] for i", "self.harmonys[-1] def getElements(self): return self.harmonys def step(self): # 新しいharmonyを作成 arr = [] for", "AC from ..algorithm_common import IAlgorithm class Harmony(IAlgorithm): def __init__(self, harmony_max, bandwidth=0.1, enable_bandwidth_rate=False, select_rate=0.8," ]
[ "- datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase):", "- datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today()", "actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual", "(datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self):", "- datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today()", "datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected", "(datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class", "test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self):", "actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def", "datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3)", "def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual,", "- datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual", "= date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self):", "- datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected =", "weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual =", "datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() -", "= [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today()", "expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0))", "actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [", "import unittest import autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range())", "as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual,", "def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def", "= datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2)", "self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3,", "class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual,", "expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3,", "= date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual =", "expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), ))", "TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self):", "(datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today()", "self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)),", "def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def", ")) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)),", "- datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() -", "since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)),", "date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3,", "def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected", "expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2)", "= datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected", "datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today()", "datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() -", "import datetime import unittest import autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual", "actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range(", "- datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today()", "expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected", "self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual,", "import autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected =", "= list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today()", "TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected)", "test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual =", "datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5)", "datetime import unittest import autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual =", "until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)),", "def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def", "self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today() + datetime.timedelta(days=3,", "actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual", "autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()]", "expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected)", "datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected =", "self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual,", "= [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual,", "= datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected =", "= (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected =", "= date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual =", "expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected)", "list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected = [ (datetime.date.today() -", "expected = [ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ]", "date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected)", "date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5)", "test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today() + datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected)", "= date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual =", "= list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today()", "datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today()", "(datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date()", "def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today() + datetime.timedelta(days=3, weeks=2) self.assertEqual(actual,", "date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual", "expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2)", "unittest import autos.utils.date as date class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected", "test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() - datetime.timedelta(days=3, weeks=2) self.assertEqual(actual, expected)", "datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def", "datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self): actual = date.get_past_date(days=3, weeks=2) expected = datetime.date.today() -", "expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3) expected", "- datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected", "datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected =", "[datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() -", "- datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() -", "datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual =", "test_returns_past_5_weeks_ago_date(self): actual = date.get_past_date(weeks=5) expected = datetime.date.today() - datetime.timedelta(weeks=5) self.assertEqual(actual, expected) def test_returns_past_3_days_and_2_weeks_ago_date(self):", "expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)),", "[ (datetime.date.today() - datetime.timedelta(days=1)), (datetime.date.today() - datetime.timedelta(days=2)), (datetime.date.today() - datetime.timedelta(days=3)), ] self.assertEqual(actual, expected)", "actual = date.get_past_date(days=3) expected = datetime.date.today() - datetime.timedelta(days=3) self.assertEqual(actual, expected) def test_returns_past_5_weeks_ago_date(self): actual", "self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() -", "def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual", "weeks=2) self.assertEqual(actual, expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today() +", "class TestDateRange(unittest.TestCase): def test_returns_today_date_as_default(self): actual = list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def", "test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self):", "test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() - datetime.timedelta(days=3)), until=(datetime.date.today() - datetime.timedelta(days=1)), )) expected =", "list(date.date_range()) expected = [datetime.date.today()] self.assertEqual(actual, expected) def test_returns_correct_range(self): actual = list(date.date_range( since=(datetime.date.today() -", "date.get_past_date() expected = (datetime.date.today() - datetime.timedelta(days=0)) self.assertEqual(actual, expected) def test_returns_past_3_days_ago_date(self): actual = date.get_past_date(days=3)", "] self.assertEqual(actual, expected) class TestGetPastDate(unittest.TestCase): def test_returns_today_date_by_default(self): actual = date.get_past_date() expected = (datetime.date.today()", "expected) def test_returns_future_date_on_negative_input(self): actual = date.get_past_date(days=-3, weeks=-2) expected = datetime.date.today() + datetime.timedelta(days=3, weeks=2)" ]
[ "from ..utils._waits import wait_until class BaseCollection: \"\"\" This class is base for all", "\"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool:", "return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that", "any of WebElement to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also,", "__getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def", "self._load() try: for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection", "(f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}' def", "@property def name(self) -> str: return self._return_locator('name') @property def tag_name(self) -> str: return", "visible on a web page during 'wait' seconds. Returns True if at least", "bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as", "class is base for all PageElement collections. This class is a wrap above", "def tag_name(self) -> str: return self._return_locator('tag name') @property def class_name(self) -> str: return", "and visible during 'wait' seconds. Returns True if all elements from collection are", "WebElement: return self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def _load(self): try: self._collection", "def __getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self) -> bool: return bool(self.collection)", "return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement:", "seconds. Returns True if all elements from collection are visible, False otherwise \"\"\"", "DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this class instance as", "def __repr__(self) -> str: return self._repr_name def __len__(self) -> int: return len(self.collection) def", "from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement", "t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}'", "at least one element from collection is present on a web page during", "t_exc def _return_locator(self, selector_type: str) -> str: if self._selector[0] == selector_type: return self._selector[1]", "list of WebElement and provide lazy load of it. It will wait for", "element from collection is visible on a web page during 'wait' seconds. Returns", "collection are present on the DOM of a page and visible during 'wait'", "collection is visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait:", "import wait_until class BaseCollection: \"\"\" This class is base for all PageElement collections.", "= CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one element from collection", "on the DOM of a page and visible during 'wait' seconds. Returns True", "-> str: return self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link text') @property", "load of it. It will wait for any of WebElement to be present", "return self._return_locator('tag name') @property def class_name(self) -> str: return self._return_locator('class name') @property def", "class_name(self) -> str: return self._return_locator('class name') @property def css_selector(self) -> str: return self._return_locator('css", "\"\"\" Check that at least one element from collection is visible on a", "' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self,", "== selector_type: return self._selector[1] return '' @property def selector(self) -> Tuple[str, str]: return", "= base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str]", "\"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver =", "NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\" This class is base for", "self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return", "__bool__(self) -> bool: return bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout", "@property def link_text(self) -> str: return self._return_locator('link text') @property def partial_link_text(self) -> str:", "least one element from collection is visible, False otherwise \"\"\" return wait_until(self._driver, wait,", "all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements from", "self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str = repr_name", "raise NoSuchElementsException( 'no such elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' %", "or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}'", "% self._selector) from t_exc def _return_locator(self, selector_type: str) -> str: if self._selector[0] ==", "NoSuchElementsException( 'no such elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector)", "during 'wait' seconds. Returns True if all elements from collection are visible, False", "is base for all PageElement collections. This class is a wrap above list", "It will wait for any of WebElement to be present on the DOM", "base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self, driver:", "xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link text')", "driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str,", "import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config import", "from collection is present on a web page during 'wait' seconds. Returns True", "BaseCollection: \"\"\" This class is base for all PageElement collections. This class is", "( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from", "as t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable to locate elements: '", "= None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection:", "visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int =", "-> str: return self._return_locator('tag name') @property def class_name(self) -> str: return self._return_locator('class name')", "= repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector},", "name') @property def css_selector(self) -> str: return self._return_locator('css selector') @property def collection(self) ->", "\"\"\" Check that at least one element from collection is present on a", "selector_type: return self._selector[1] return '' @property def selector(self) -> Tuple[str, str]: return self._selector", "work with this class instance as with basic list. For example:: base_collection =", "List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import", "str: return self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link text') @property def", "base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection: print(element.text) first_element =", "'{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type: str) -> str: if self._selector[0]", "def selector(self) -> Tuple[str, str]: return self._selector @property def id(self) -> str: #", "For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection: print(element.text)", "of a page and visible during 'wait' seconds. Returns True if all elements", "base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] =", "WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait", "is visible on a web page during 'wait' seconds. Returns True if at", "def __str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str:", "def __len__(self) -> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def", "for any of WebElement to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds.", "return self._repr_name def __len__(self) -> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return", "text') @property def name(self) -> str: return self._return_locator('name') @property def tag_name(self) -> str:", "collection is visible on a web page during 'wait' seconds. Returns True if", "return iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self) -> bool:", "that at least one element from collection is present on a web page", "str) -> str: if self._selector[0] == selector_type: return self._selector[1] return '' @property def", "False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout)", "seconds. Also, you can work with this class instance as with basic list.", "__len__(self) -> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self,", "bool: \"\"\" Check that at least one element from collection is visible on", "'div'}) for element in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) == 50", ") from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from ..exceptions", "Tuple[str, str]: return self._selector @property def id(self) -> str: # pylint: disable=invalid-name return", "from collection is visible on a web page during 'wait' seconds. Returns True", "first_element = base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver, repr_name:", "visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from", "repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector}, Collection:", "all elements from collection are present on the DOM of a page and", "WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits", "WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str, str]", "in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int", "-> str: # pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str: return", "str: if self._selector[0] == selector_type: return self._selector[1] return '' @property def selector(self) ->", "css_selector(self) -> str: return self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if not", "def css_selector(self) -> str: return self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if", "elements from collection are present on the DOM of a page and visible", "PageElement collections. This class is a wrap above list of WebElement. Property `collection`", "self._load() return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check", "This class is a wrap above list of WebElement. Property `collection` contains list", "class instance as with basic list. For example:: base_collection = BaseCollection(driver, **{'css selector':", "CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable", "'wait' seconds. Returns True if all elements from collection are visible, False otherwise", "visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG", "return self._return_locator('link text') @property def partial_link_text(self) -> str: return self._return_locator('partial link text') @property", "self._collection: List[WebElement] = [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def", "__iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item] def", "selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import", "..config import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until", "def id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) ->", "pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath') @property def", "str: return self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if not self._collection or", "self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if not self._collection or not isinstance(self._collection,", "element from collection is present on a web page during 'wait' seconds. Returns", "visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int =", "\"\"\" Check that all elements from collection are present on the DOM of", "otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) ->", "item) -> WebElement: return self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def _load(self):", "def partial_link_text(self) -> str: return self._return_locator('partial link text') @property def name(self) -> str:", "import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located", "of WebElement and provide lazy load of it. It will wait for any", "can work with this class instance as with basic list. For example:: base_collection", "return self._return_locator('partial link text') @property def name(self) -> str: return self._return_locator('name') @property def", "is visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int", "of it. It will wait for any of WebElement to be present on", "typing import Iterator, List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException )", "Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name def __len__(self) -> int: return", "return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\"", "from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\" This class", "self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def _load(self): try: self._collection = WebDriverWait(", "str]: return self._selector @property def id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id')", "_return_locator(self, selector_type: str) -> str: if self._selector[0] == selector_type: return self._selector[1] return ''", "def class_name(self) -> str: return self._return_locator('class name') @property def css_selector(self) -> str: return", "try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException(", "base for all PageElement collections. This class is a wrap above list of", "self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self) -> str:", "class BaseCollection: \"\"\" This class is base for all PageElement collections. This class", "import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import", "one element from collection is present, False otherwise \"\"\" return wait_until(self._driver, wait, presence_of_all_elements_located(self._selector))", "self._repr_name def __len__(self) -> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection)", "assert len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] = None,", "CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\" This", "selector(self) -> Tuple[str, str]: return self._selector @property def id(self) -> str: # pylint:", "Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self) ->", "Check that at least one element from collection is present on a web", "will wait for any of WebElement to be present on the DOM for", "as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\"", "Also, you can work with this class instance as with basic list. For", "\"\"\" This class is base for all PageElement collections. This class is a", "_load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise", "one element from collection is visible on a web page during 'wait' seconds.", "lazy load of it. It will wait for any of WebElement to be", "int = CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements from collection are", "str: return self._return_locator('partial link text') @property def name(self) -> str: return self._return_locator('name') @property", "least one element from collection is present, False otherwise \"\"\" return wait_until(self._driver, wait,", "return bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException", "tag_name(self) -> str: return self._return_locator('tag name') @property def class_name(self) -> str: return self._return_locator('class", "name') @property def class_name(self) -> str: return self._return_locator('class name') @property def css_selector(self) ->", "@property def class_name(self) -> str: return self._return_locator('class name') @property def css_selector(self) -> str:", "Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str = repr_name or", "web page during 'wait' seconds. Returns True if at least one element from", "def xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link", "example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection: print(element.text) first_element", "repr_name: Optional[str] = None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str, str] =", "-*- coding: utf-8 -*- from typing import Iterator, List, Optional, Tuple from selenium.common.exceptions", "from t_exc def _return_locator(self, selector_type: str) -> str: if self._selector[0] == selector_type: return", "iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self) -> bool: return", "__init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver = driver self._selector:", "'wait' seconds. Returns True if at least one element from collection is present,", "def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least", "@property def tag_name(self) -> str: return self._return_locator('tag name') @property def class_name(self) -> str:", "self._selector @property def id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id') @property def", "present on the DOM of a page and visible during 'wait' seconds. Returns", "TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable to locate elements:", "str: return self._return_locator('class name') @property def css_selector(self) -> str: return self._return_locator('css selector') @property", "Returns True if all elements from collection are visible, False otherwise \"\"\" return", "{self._collection}' def __repr__(self) -> str: return self._repr_name def __len__(self) -> int: return len(self.collection)", "== 50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver:", "'wait' seconds. Returns True if at least one element from collection is visible,", "return self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if not self._collection or not", "if at least one element from collection is present, False otherwise \"\"\" return", "BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection: print(element.text) first_element = base_collection[0] assert", "WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name:", "self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link text') @property def partial_link_text(self) ->", "..utils._waits import wait_until class BaseCollection: \"\"\" This class is base for all PageElement", "WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located )", "provide lazy load of it. It will wait for any of WebElement to", "to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work", "self._collection or not isinstance(self._collection, list): self._load() try: for e in self._collection: isinstance(e.location, dict)", "return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name def __len__(self)", "= driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str", "wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check", "is present on a web page during 'wait' seconds. Returns True if at", "-> str: return self._repr_name def __len__(self) -> int: return len(self.collection) def __iter__(self) ->", "is a wrap above list of WebElement. Property `collection` contains list of WebElement", "-> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item] def __bool__(self)", "to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type: str)", "Optional[str] = None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0]", "WebElement and provide lazy load of it. It will wait for any of", "List[WebElement] = [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self)", "\"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool:", "-> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item)", "StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions", "..exceptions import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\" This class is", "WebElement. Property `collection` contains list of WebElement and provide lazy load of it.", "def name(self) -> str: return self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag", "name(self) -> str: return self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag name')", "from typing import Iterator, List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException", "-> bool: return bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector))", "wait for any of WebElement to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT`", "def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc:", "visible during 'wait' seconds. Returns True if all elements from collection are visible,", "you can work with this class instance as with basic list. For example::", "wait_until class BaseCollection: \"\"\" This class is base for all PageElement collections. This", "str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str: return f'Selector:", "def __init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver = driver", "all PageElement collections. This class is a wrap above list of WebElement. Property", "locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type: str) ->", "text') @property def partial_link_text(self) -> str: return self._return_locator('partial link text') @property def name(self)", "StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\"", "__repr__(self) -> str: return self._repr_name def __len__(self) -> int: return len(self.collection) def __iter__(self)", "@property def id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self)", "e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait:", "def __bool__(self) -> bool: return bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver,", "page and visible during 'wait' seconds. Returns True if all elements from collection", "str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name def", "@property def collection(self) -> List[WebElement]: if not self._collection or not isinstance(self._collection, list): self._load()", "seconds. Returns True if at least one element from collection is present, False", "from collection is visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self,", "visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at", "from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located,", "one element from collection is present on a web page during 'wait' seconds.", "@property def partial_link_text(self) -> str: return self._return_locator('partial link text') @property def name(self) ->", "self._return_locator('class name') @property def css_selector(self) -> str: return self._return_locator('css selector') @property def collection(self)", "# -*- coding: utf-8 -*- from typing import Iterator, List, Optional, Tuple from", "print(element.text) first_element = base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver,", "return self._selector @property def id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id') @property", "class is a wrap above list of WebElement. Property `collection` contains list of", "self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements: '", "if all elements from collection are visible, False otherwise \"\"\" return wait_until(self._driver, wait,", "any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one", "bool: \"\"\" Check that at least one element from collection is present on", "Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver", "a page and visible during 'wait' seconds. Returns True if all elements from", "for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self,", "int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one element from", "`SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this class instance as with basic", "@property def selector(self) -> Tuple[str, str]: return self._selector @property def id(self) -> str:", "-> str: return self._return_locator('partial link text') @property def name(self) -> str: return self._return_locator('name')", "Iterator, List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver", "selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config", "link_text(self) -> str: return self._return_locator('link text') @property def partial_link_text(self) -> str: return self._return_locator('partial", "str: return self._return_locator('tag name') @property def class_name(self) -> str: return self._return_locator('class name') @property", "self._selector[0] == selector_type: return self._selector[1] return '' @property def selector(self) -> Tuple[str, str]:", "List[WebElement]: if not self._collection or not isinstance(self._collection, list): self._load() try: for e in", "elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def", "def link_text(self) -> str: return self._return_locator('link text') @property def partial_link_text(self) -> str: return", "partial_link_text(self) -> str: return self._return_locator('partial link text') @property def name(self) -> str: return", "**locators): self._driver: WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] =", "' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type: str) -> str: if", "-> str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name", "selector_type: str) -> str: if self._selector[0] == selector_type: return self._selector[1] return '' @property", "self._return_locator('tag name') @property def class_name(self) -> str: return self._return_locator('class name') @property def css_selector(self)", "at least one element from collection is visible, False otherwise \"\"\" return wait_until(self._driver,", "link text') @property def name(self) -> str: return self._return_locator('name') @property def tag_name(self) ->", "from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from", "list): self._load() try: for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return", "'no such elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from", "= BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection: print(element.text) first_element = base_collection[0]", "for element in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) == 50 \"\"\"", "`collection` contains list of WebElement and provide lazy load of it. It will", "-> List[WebElement]: if not self._collection or not isinstance(self._collection, list): self._load() try: for e", "self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at", "len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement: return", "for all PageElement collections. This class is a wrap above list of WebElement.", "'' @property def selector(self) -> Tuple[str, str]: return self._selector @property def id(self) ->", "elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type: str) -> str:", "disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self)", "collections. This class is a wrap above list of WebElement. Property `collection` contains", "elements from collection are visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def", "above list of WebElement. Property `collection` contains list of WebElement and provide lazy", "-> str: return self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag name') @property", "True if at least one element from collection is present, False otherwise \"\"\"", "self._selector[1] return '' @property def selector(self) -> Tuple[str, str]: return self._selector @property def", "selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located,", "' f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self)", "self._return_locator('partial link text') @property def name(self) -> str: return self._return_locator('name') @property def tag_name(self)", "import Iterator, List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from", "except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) -> bool:", "list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}')", "if self._selector[0] == selector_type: return self._selector[1] return '' @property def selector(self) -> Tuple[str,", "isinstance(self._collection, list): self._load() try: for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load()", "wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that", "None, **locators): self._driver: WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement]", "collection(self) -> List[WebElement]: if not self._collection or not isinstance(self._collection, list): self._load() try: for", "return self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def _load(self): try: self._collection =", "len(base_collection) == 50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators):", "self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no", ") from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import (", "f'{self._selector}') def __str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) ->", "isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout)", "Check that at least one element from collection is visible on a web", "-*- from typing import Iterator, List, Optional, Tuple from selenium.common.exceptions import ( StaleElementReferenceException,", "list. For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element in base_collection:", "wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements from collection", "{self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name def __len__(self) -> int:", "driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str =", "self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag name') @property def class_name(self) ->", "# pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath') @property", "Property `collection` contains list of WebElement and provide lazy load of it. It", "import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from", "of WebElement. Property `collection` contains list of WebElement and provide lazy load of", "f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return self._repr_name def __len__(self) ->", "one element from collection is visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector))", "with this class instance as with basic list. For example:: base_collection = BaseCollection(driver,", "[] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) -> str:", "element in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) == 50 \"\"\" def", "def _return_locator(self, selector_type: str) -> str: if self._selector[0] == selector_type: return self._selector[1] return", "all elements from collection are visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector))", "are visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int", "that at least one element from collection is visible on a web page", "try: for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def", "from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from ..exceptions import", "selector') @property def collection(self) -> List[WebElement]: if not self._collection or not isinstance(self._collection, list):", "selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait", "return self._selector[1] return '' @property def selector(self) -> Tuple[str, str]: return self._selector @property", "str: # pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath')", "of WebElement to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you", "it. It will wait for any of WebElement to be present on the", "return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\"", "str: return self._repr_name def __len__(self) -> int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]:", "CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one element from collection is", "are present on the DOM of a page and visible during 'wait' seconds.", "instance as with basic list. For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'})", "return self._return_locator('xpath') @property def link_text(self) -> str: return self._return_locator('link text') @property def partial_link_text(self)", "-> WebElement: return self.collection[item] def __bool__(self) -> bool: return bool(self.collection) def _load(self): try:", "-> str: if self._selector[0] == selector_type: return self._selector[1] return '' @property def selector(self)", "True if all elements from collection are visible, False otherwise \"\"\" return wait_until(self._driver,", "wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that", "from selenium.webdriver.support.expected_conditions import ( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from", "-> str: return self._return_locator('class name') @property def css_selector(self) -> str: return self._return_locator('css selector')", "def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) -> WebElement: return self.collection[item]", "any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one", "-> str: return self._return_locator('css selector') @property def collection(self) -> List[WebElement]: if not self._collection", "True if at least one element from collection is visible, False otherwise \"\"\"", "TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.support.expected_conditions import", "utf-8 -*- from typing import Iterator, List, Optional, Tuple from selenium.common.exceptions import (", "wrap above list of WebElement. Property `collection` contains list of WebElement and provide", "present on a web page during 'wait' seconds. Returns True if at least", "not self._collection or not isinstance(self._collection, list): self._load() try: for e in self._collection: isinstance(e.location,", "False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout)", "page during 'wait' seconds. Returns True if at least one element from collection", "basic list. For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element in", "-> str: return self._return_locator('link text') @property def partial_link_text(self) -> str: return self._return_locator('partial link", "wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check", "@property def xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self) -> str: return", "presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as", "on a web page during 'wait' seconds. Returns True if at least one", "contains list of WebElement and provide lazy load of it. It will wait", "= list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: '", "and provide lazy load of it. It will wait for any of WebElement", "This class is base for all PageElement collections. This class is a wrap", "-> bool: \"\"\" Check that at least one element from collection is present", "list of WebElement. Property `collection` contains list of WebElement and provide lazy load", "such elements: ' 'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc", "self._selector) from t_exc def _return_locator(self, selector_type: str) -> str: if self._selector[0] == selector_type:", "def collection(self) -> List[WebElement]: if not self._collection or not isinstance(self._collection, list): self._load() try:", "SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection:", "least one element from collection is present on a web page during 'wait'", "import NoSuchElementsException from ..utils._waits import wait_until class BaseCollection: \"\"\" This class is base", "WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements:", "Returns True if at least one element from collection is visible, False otherwise", "self._collection: isinstance(e.location, dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int =", "the DOM of a page and visible during 'wait' seconds. Returns True if", "**{'css selector': 'div'}) for element in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection)", "visibility_of_any_elements_located(self._selector)) def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that all", ").until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable to", "bool: \"\"\" Check that all elements from collection are present on the DOM", "in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) == 50 \"\"\" def __init__(self,", "= [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}: ' f'{self._selector}') def __str__(self) ->", "a web page during 'wait' seconds. Returns True if at least one element", "a wrap above list of WebElement. Property `collection` contains list of WebElement and", "return self._return_locator('id') @property def xpath(self) -> str: return self._return_locator('xpath') @property def link_text(self) ->", "at least one element from collection is visible on a web page during", "collection is present, False otherwise \"\"\" return wait_until(self._driver, wait, presence_of_all_elements_located(self._selector)) __all__ = ['BaseCollection']", "return '' @property def selector(self) -> Tuple[str, str]: return self._selector @property def id(self)", "not isinstance(self._collection, list): self._load() try: for e in self._collection: isinstance(e.location, dict) except StaleElementReferenceException:", "DOM of a page and visible during 'wait' seconds. Returns True if all", "dict) except StaleElementReferenceException: self._load() return self._collection def any_is_visible(self, wait: int = CONFIG.wait_timeout) ->", "= WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except TimeoutException as t_exc: raise NoSuchElementsException( 'no such", "from ..config import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import", "present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this", "collection is present on a web page during 'wait' seconds. Returns True if", "seconds. Returns True if at least one element from collection is visible, False", "that all elements from collection are present on the DOM of a page", "-> bool: \"\"\" Check that at least one element from collection is visible", "Tuple from selenium.common.exceptions import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from", "the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this class instance", "on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this class", "at least one element from collection is present, False otherwise \"\"\" return wait_until(self._driver,", "collection are visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait:", "Check that all elements from collection are present on the DOM of a", "least one element from collection is visible on a web page during 'wait'", "bool: return bool(self.collection) def _load(self): try: self._collection = WebDriverWait( self._driver, CONFIG.lazy_load_timeout ).until(presence_of_all_elements_located(self._selector)) except", "def all_are_visible(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements", "if at least one element from collection is visible, False otherwise \"\"\" return", "CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements from collection are present on", "otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self, wait: int = CONFIG.wait_timeout) ->", "element from collection is visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_any_elements_located(self._selector)) def", "return self._return_locator('class name') @property def css_selector(self) -> str: return self._return_locator('css selector') @property def", "be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with", "import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException from ..utils._waits import wait_until class", "str] = list(locators.items())[0] self._collection: List[WebElement] = [] self._repr_name: str = repr_name or (f'{self.__class__.__name__}:", "str: return self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag name') @property def", "this class instance as with basic list. For example:: base_collection = BaseCollection(driver, **{'css", "import ( StaleElementReferenceException, TimeoutException ) from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webelement import WebElement", "wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least one element", "'Unable to locate elements: ' '{\"method\":\"%s\",\"selector\":\"%s\"}' % self._selector) from t_exc def _return_locator(self, selector_type:", "self._return_locator('link text') @property def partial_link_text(self) -> str: return self._return_locator('partial link text') @property def", "WebElement to be present on the DOM for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can", "id(self) -> str: # pylint: disable=invalid-name return self._return_locator('id') @property def xpath(self) -> str:", "50 \"\"\" def __init__(self, driver: WebDriver, repr_name: Optional[str] = None, **locators): self._driver: WebDriver", "self._driver: WebDriver = driver self._selector: Tuple[str, str] = list(locators.items())[0] self._collection: List[WebElement] = []", "selector': 'div'}) for element in base_collection: print(element.text) first_element = base_collection[0] assert len(base_collection) ==", "from collection is present, False otherwise \"\"\" return wait_until(self._driver, wait, presence_of_all_elements_located(self._selector)) __all__ =", "if not self._collection or not isinstance(self._collection, list): self._load() try: for e in self._collection:", "from collection are present on the DOM of a page and visible during", "return self._return_locator('name') @property def tag_name(self) -> str: return self._return_locator('tag name') @property def class_name(self)", "except TimeoutException as t_exc: raise NoSuchElementsException( 'no such elements: ' 'Unable to locate", "for `SHAWL_LAZY_LOAD_TIMEOUT` seconds. Also, you can work with this class instance as with", "__str__(self) -> str: return f'Selector: {self._selector}, Collection: {self._collection}' def __repr__(self) -> str: return", "str: return self._return_locator('link text') @property def partial_link_text(self) -> str: return self._return_locator('partial link text')", "@property def css_selector(self) -> str: return self._return_locator('css selector') @property def collection(self) -> List[WebElement]:", "from collection are visible, False otherwise \"\"\" return wait_until(self._driver, wait, visibility_of_all_elements_located(self._selector)) def any_is_present(self,", "= CONFIG.wait_timeout) -> bool: \"\"\" Check that all elements from collection are present", "or not isinstance(self._collection, list): self._load() try: for e in self._collection: isinstance(e.location, dict) except", "-> Tuple[str, str]: return self._selector @property def id(self) -> str: # pylint: disable=invalid-name", "coding: utf-8 -*- from typing import Iterator, List, Optional, Tuple from selenium.common.exceptions import", "during 'wait' seconds. Returns True if at least one element from collection is", "-> bool: \"\"\" Check that all elements from collection are present on the", "Returns True if at least one element from collection is present, False otherwise", "element from collection is present, False otherwise \"\"\" return wait_until(self._driver, wait, presence_of_all_elements_located(self._selector)) __all__", "def any_is_present(self, wait: int = CONFIG.wait_timeout) -> bool: \"\"\" Check that at least", "selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG as CONFIG from ..exceptions import NoSuchElementsException", "int: return len(self.collection) def __iter__(self) -> Iterator[WebElement]: return iter(self.collection) def __getitem__(self, item) ->", "( presence_of_all_elements_located, visibility_of_all_elements_located, visibility_of_any_elements_located ) from selenium.webdriver.support.wait import WebDriverWait from ..config import SHAWL_CONFIG", "with basic list. For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for element", "as with basic list. For example:: base_collection = BaseCollection(driver, **{'css selector': 'div'}) for" ]
[ "\"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2))", "input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using", "infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input file # Open", "the input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3)", "\") line1 = infile.readline() line2 = infile.readline() line3 = infile.readline() line4 = infile.readline()", "= open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2 = infile.readline()", "infile.read(10) print(repr(s2)) infile.close() # Close the input file # Open file for input", "= infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close the input file", "for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() #", "open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close the input file", "print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input file # Open file for", "print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close the input file # Open", "print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input file # Open file", "read(): \") print(infile.read()) infile.close() # Close the input file # Open file for", "infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input", "open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2 = infile.readline() line3", "print(infile.read()) infile.close() # Close the input file # Open file for input infile", "Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1", "readline(): \") line1 = infile.readline() line2 = infile.readline() line3 = infile.readline() line4 =", "input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close", "file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 =", "Using read(number): \") s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() #", "Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read())", "print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close the input file # Open", "the input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2)", "infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2 =", "infile.readline() line2 = infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3))", "\"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close the input file #", "file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close()", "# Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \")", "for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4)", "# Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \")", "Close the input file # Open file for input infile = open(\"Presidents.txt\", \"r\")", "Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines())", "open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1) s2 = infile.read(10)", "# Close the input file # Open file for input infile = open(\"Presidents.txt\",", "file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline():", "= open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close the input", "s2 = infile.read(10) print(repr(s2)) infile.close() # Close the input file # Open file", "input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using", "Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1", "line1 = infile.readline() line2 = infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1))", "\") s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close the", "= infile.read(10) print(repr(s2)) infile.close() # Close the input file # Open file for", "# Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \")", "print(repr(line4)) infile.close() # Close the input file # Open file for input infile", "print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close the input file main() #", "s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close the input", "print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close()", "= infile.readline() line2 = infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2))", "print(infile.readlines()) infile.close() # Close the input file main() # Call the main function", "\") print(infile.readlines()) infile.close() # Close the input file main() # Call the main", "input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1)", "input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close", "file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 =", "the input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4)", "for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline()", "= infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the", "\"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2 = infile.readline() line3 =", "= open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1) s2 =", "print(repr(line3)) print(repr(line4)) infile.close() # Close the input file # Open file for input", "Using readlines(): \") print(infile.readlines()) infile.close() # Close the input file main() # Call", "infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close the", "= open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close the input", "infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close() # Close the", "def main(): # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using", "= infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close()", "Using read(): \") print(infile.read()) infile.close() # Close the input file # Open file", "print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2 = infile.readline() line3 = infile.readline()", "read(number): \") s1 = infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close", "line2 = infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4))", "<gh_stars>0 def main(): # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1)", "infile.readline() line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() #", "line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input file", "file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read(): \") print(infile.read()) infile.close()", "file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines():", "print(repr(s2)) infile.close() # Close the input file # Open file for input infile", "infile.close() # Close the input file # Open file for input infile =", "file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number):", "input file # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using", "readlines(): \") print(infile.readlines()) infile.close() # Close the input file main() # Call the", "open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close the input file", "line3 = infile.readline() line4 = infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close", "main(): # Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"(1) Using read():", "\") print(infile.read()) infile.close() # Close the input file # Open file for input", "\"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() # Close the input file main()", "input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \") line1 = infile.readline() line2", "# Open file for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(3) Using readline(): \")", "= infile.readline() print(repr(line1)) print(repr(line2)) print(repr(line3)) print(repr(line4)) infile.close() # Close the input file #", "for input infile = open(\"Presidents.txt\", \"r\") print(\"\\n(4) Using readlines(): \") print(infile.readlines()) infile.close() #", "infile.read(4) print(s1) s2 = infile.read(10) print(repr(s2)) infile.close() # Close the input file #", "infile = open(\"Presidents.txt\", \"r\") print(\"\\n(2) Using read(number): \") s1 = infile.read(4) print(s1) s2", "Using readline(): \") line1 = infile.readline() line2 = infile.readline() line3 = infile.readline() line4" ]
[ "if errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def", "%s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending", "defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if", "post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if", "info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is", "None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status = None def _background_run(self, target,", "uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if", "time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script()", "settings, printer, plugin, plugin_manager, identifier, logger): self._settings = settings self._printer = printer self._plugin", "self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise", "= self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return", "self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as configfile: for line in", "dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else None,", "result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False)", "script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script()", "import FlasherError import time import flask import requests import tempfile import os import", "[line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def", "def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script:", "for f in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with", "self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors =", "_wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def", "self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result", "event_name): if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status) self._plugin_manager.send_plugin_message(self._identifier, data) def send_initial_state(self):", "download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp:", "_handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs, files in", "_push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\")", "commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script", "None self._firmware_version = None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script = False", "it...\") with open(os.path.join(root, f), \"r\") as version_file: for line in version_file: version =", "GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script =", "self._plugin_manager = plugin_manager self._identifier = identifier self._logger = logger self._firmware = None self._firmware_version", "is not None else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data", "def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\")", "call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author,", "f in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root,", "in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0]", "f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as configfile:", "pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\")", "import re from threading import Thread class BaseFlasher: def __init__(self, settings, printer, plugin,", "= printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger =", "script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported", "False self._flash_status = None def _background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start()", "re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\"", "args=None): thread = Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script()", "version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found", "in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash", "False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the", "self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\")", "= temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result", "self._printer = printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger", "not None else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data =", "break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\")", "self._firmware_version = None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status", "break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through", "\"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as configfile: for line", "through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not", "result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"])", "as version_file: for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if", "pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands)", "+\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version)", "from .flasher_error import FlasherError import time import flask import requests import tempfile import", "for root, dirs, files in os.walk(self._firmware): for f in files: if f ==", "= author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined", "self._logger = logger self._firmware = None self._firmware_version = None self._firmware_author = None self._firmware_upload_time", "self._firmware_upload_time = None self._should_run_post_script = False self._flash_status = None def _background_run(self, target, args=None):", "if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()]", "self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay())", "by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors:", "re from threading import Thread class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager,", "author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self):", "= None def _background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start() return thread", ": %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self):", "raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for", "= flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors", "if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as", "downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def", "import Thread class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings", "self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\")", "%s\" % self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with", "as temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path)", "def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands =", "self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode", "self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self):", "requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors", "_validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script =", "None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def", "else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def", "line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author =", "firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status)", "temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result =", "self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self):", "version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else None, firmware=self._firmware ))", "function call.\") def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for f in", "temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return", "self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h,", "for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self):", "dirs, files in os.walk(self._firmware): for f in files: if f == \"Version.h\": self._logger.debug(\"Found", ".flasher_error import FlasherError import time import flask import requests import tempfile import os", "= re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION :", "user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return", "delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script:", "author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR", "self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info", "firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path", "self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path)", "firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self):", "for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version", "% self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root,", "__init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings = settings self._printer = printer", "for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author", "+SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" %", "target, args=None): thread = Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script =", "logger): self._settings = settings self._printer = printer self._plugin = plugin self._plugin_manager = plugin_manager", "self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f ==", "handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\")", "if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def", "self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded", "errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r =", "errors: self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\")", "def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function", "opening it...\") with open(os.path.join(root, f), \"r\") as version_file: for line in version_file: version", "self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path)", "self._plugin = plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger = logger self._firmware", "= plugin_manager self._identifier = identifier self._logger = logger self._firmware = None self._firmware_version =", "firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware):", "self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self):", "SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening", "self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if", "threading import Thread class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger):", "import requests import tempfile import os import re from threading import Thread class", "in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f),", "None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r", "raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict(", "_background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script", "= self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise", "as configfile: for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if", "def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings = settings self._printer =", "requests import tempfile import os import re from threading import Thread class BaseFlasher:", "f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as version_file:", "self._firmware = None self._firmware_version = None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script", "post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands)", "self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported", "FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self):", "plugin, plugin_manager, identifier, logger): self._settings = settings self._printer = printer self._plugin = plugin", "self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()]", "call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()]", "self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash", "plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger = logger self._firmware = None", "= self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving", "= self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info()", "version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION", "= Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script:", "self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else", "self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as version_file: for line in", "commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def", "import time import flask import requests import tempfile import os import re from", "def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as", "self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\")", "self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content)", "configfile: for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author:", "self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status = None def", "STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def", "identifier self._logger = logger self._firmware = None self._firmware_version = None self._firmware_author = None", "in os.walk(self._firmware): for f in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening", "pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in", "upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else None, firmware=self._firmware )) def _push_flash_status(self,", "self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self,", "line) if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break", "= None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status =", "call.\") def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for f in files:", "files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\")", "def _push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status) self._plugin_manager.send_plugin_message(self._identifier, data)", "f), \"r\") as version_file: for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"',", "printer, plugin, plugin_manager, identifier, logger): self._settings = settings self._printer = printer self._plugin =", "self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\")", "return result def download(self): self._logger.debug(\"Downloading firmware...\") r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with", "function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise", "\"r\") as version_file: for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line)", "= requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name", "defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function", "tempfile import os import re from threading import Thread class BaseFlasher: def __init__(self,", "Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running", "= False self._flash_status = None def _background_run(self, target, args=None): thread = Thread(target=target, args=args)", "if self._firmware_upload_time is not None else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if", "temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None,", "in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay())", "open(os.path.join(root, f), \"r\") as configfile: for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR", "self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in", "+ self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result =", "os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return", "return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root,", "version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found", "time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands", "def _background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self):", "self._settings = settings self._printer = printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier", "version_file: for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version:", "return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading firmware...\")", "None else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data = dict(", "uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None,", "return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script", "errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self,", "raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def", "result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path):", "_firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier,", "configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found", "flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result", "[line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting", "= plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger = logger self._firmware =", "post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No", "self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise", "self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing", "+STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" %", "delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script)", "time import flask import requests import tempfile import os import re from threading", "class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings = settings", "def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\")", "pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\")", "args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash", "def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script)", "self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for", "line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION +\"([^\"]*)\"', line) if version: self._firmware_version =", "thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\")", "if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded", "raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path =", "= False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by", "author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function", "= logger self._firmware = None self._firmware_version = None self._firmware_author = None self._firmware_upload_time =", "plugin_manager, identifier, logger): self._settings = settings self._printer = printer self._plugin = plugin self._plugin_manager", "identifier, logger): self._settings = settings self._printer = printer self._plugin = plugin self._plugin_manager =", "firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time", "firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if", "self._should_run_post_script = False self._flash_status = None def _background_run(self, target, args=None): thread = Thread(target=target,", "function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" +", "script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No", "None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name )", "check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path", "Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as version_file: for line in version_file:", "f), \"r\") as configfile: for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"',", "% self._firmware_author) break def _firmware_info_event_name(self): raise FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware", "_wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running", "with open(os.path.join(root, f), \"r\") as version_file: for line in version_file: version = re.findall(r'#define", "script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script", "post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for", "thread = Thread(target=target, args=args) thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if", "def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs, files", "self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f),", "= None self._should_run_post_script = False self._flash_status = None def _background_run(self, target, args=None): thread", "errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self):", "else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name", "logger self._firmware = None self._firmware_version = None self._firmware_author = None self._firmware_upload_time = None", "author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else None, firmware=self._firmware )) def", "type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None else None, firmware=self._firmware", "self._firmware_upload_time is not None else None, firmware=self._firmware )) def _push_flash_status(self, event_name): if self._flash_status:", "= version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f == \"Configuration.h\":", "the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info()", "tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info()", "flask import requests import tempfile import os import re from threading import Thread", "os import re from threading import Thread class BaseFlasher: def __init__(self, settings, printer,", "= None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status = None def _background_run(self,", "FlasherError import time import flask import requests import tempfile import os import re", "FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for f", "Thread class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings =", "errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info()", "if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def", "= re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR :", "with open(os.path.join(root, f), \"r\") as configfile: for line in configfile: author = re.findall(r'#define", "def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\")", "plugin_manager self._identifier = identifier self._logger = logger self._firmware = None self._firmware_version = None", "else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def _validate_firmware_file(self, file_path):", "def _find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for f in files: if", "== \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as configfile: for", "= self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line", "import flask import requests import tempfile import os import re from threading import", "_push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status) self._plugin_manager.send_plugin_message(self._identifier, data) def", "_find_firmware_info(self): for root, dirs, files in os.walk(self._firmware): for f in files: if f", "if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line", "pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands =", "def _validate_firmware_file(self, file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script", "os.walk(self._firmware): for f in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\")", "opening it...\") with open(os.path.join(root, f), \"r\") as configfile: for line in configfile: author", "= [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\")", "= None self._firmware_version = None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script =", "it...\") with open(os.path.join(root, f), \"r\") as configfile: for line in configfile: author =", "def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y,", "line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author) break", "= settings self._printer = printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier =", "+\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\" % self._firmware_author)", "self._flash_status = None def _background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start() return", "self._identifier = identifier self._logger = logger self._firmware = None self._firmware_version = None self._firmware_author", "post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\") time.sleep(self._settings.get_post_flash_delay()) def", "result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for root, dirs,", "self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware", "temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors", "return None, errors result = self._handle_firmware_file(temp_path) self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result", "self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path) self._push_firmware_info() return result def download(self): self._logger.debug(\"Downloading", "BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier, logger): self._settings = settings self._printer", "os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function call.\") def _find_firmware_info(self): for", "call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported", "None self._firmware_author = None self._firmware_upload_time = None self._should_run_post_script = False self._flash_status = None", "_run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands", "re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0] self._logger.debug(\"Found STRING_CONFIG_H_AUTHOR : %s\"", "from threading import Thread class BaseFlasher: def __init__(self, settings, printer, plugin, plugin_manager, identifier,", "= [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self):", "if version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif", "thread.start() return thread def _run_pre_flash_script(self): pre_flash_script = self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode", ")) def _push_flash_status(self, event_name): if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status) self._plugin_manager.send_plugin_message(self._identifier,", "errors = self._validate_firmware_file(temp_path) if errors: self._push_firmware_info() os.remove(temp_path) return None, errors result = self._handle_firmware_file(temp_path)", "if self._flash_status: data = dict( type=event_name ) data.update(self._flash_status) self._plugin_manager.send_plugin_message(self._identifier, data) def send_initial_state(self): self._push_firmware_info()", "self._push_firmware_info() self._logger.debug(\"Clearing downloaded firmware...\") os.remove(temp_path) return result def _handle_firmware_file(self, firmware_file_path): raise FlasherError(\"Unsupported function", "root, dirs, files in os.walk(self._firmware): for f in files: if f == \"Version.h\":", "with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path) if errors:", "None self._should_run_post_script = False self._flash_status = None def _background_run(self, target, args=None): thread =", "websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version, author=self._firmware_author, upload_time=self._firmware_upload_time.strftime(\"%d/%m/%Y, %H:%M:%S\") if self._firmware_upload_time is not None", "def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors", "\"r\") as configfile: for line in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line)", "function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name(), version=self._firmware_version,", "import tempfile import os import re from threading import Thread class BaseFlasher: def", "version: self._firmware_version = version[0] self._logger.debug(\"Found SHORT_BUILD_VERSION : %s\" % self._firmware_version) break elif f", "for line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash", "downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path = temp.name errors = self._validate_firmware_file(temp_path)", "\"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as version_file: for line", "= identifier self._logger = logger self._firmware = None self._firmware_version = None self._firmware_author =", "%H:%M:%S\") if self._firmware_upload_time is not None else None, firmware=self._firmware )) def _push_flash_status(self, event_name):", "GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else:", "= self._settings.get_pre_flash_script() if pre_flash_script: self._logger.debug(\"Running pre-flash GCode script :\") self._logger.debug(pre_flash_script) commands = [line.strip()", "self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip() for line in post_flash_script.splitlines()] self._printer.commands(commands) else:", "pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting pre-flash delay...\") time.sleep(self._settings.get_pre_flash_delay()) def _run_post_flash_script(self): post_flash_script", "== \"Version.h\": self._logger.debug(\"Found Version.h, opening it...\") with open(os.path.join(root, f), \"r\") as version_file: for", "Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as configfile: for line in configfile:", "open(os.path.join(root, f), \"r\") as version_file: for line in version_file: version = re.findall(r'#define +SHORT_BUILD_VERSION", "files in os.walk(self._firmware): for f in files: if f == \"Version.h\": self._logger.debug(\"Found Version.h,", "None def _background_run(self, target, args=None): thread = Thread(target=target, args=args) thread.start() return thread def", "line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash GCode script defined\") def _wait_pre_flash_delay(self): self._logger.debug(\"Waiting", "elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\") with open(os.path.join(root, f), \"r\") as", "in configfile: author = re.findall(r'#define +STRING_CONFIG_H_AUTHOR +\"([^\"]*)\"', line) if author: self._firmware_author = author[0]", "FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\"", ":\") self._logger.debug(pre_flash_script) commands = [line.strip() for line in pre_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No pre-flash", "printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier = identifier self._logger = logger", "settings self._printer = printer self._plugin = plugin self._plugin_manager = plugin_manager self._identifier = identifier", "self._should_run_post_script = False def check_setup_errors(self): raise FlasherError(\"Unsupported function call.\") def upload(self): self._logger.debug(\"Firmware uploaded", "self._settings.get_upload_path_suffix()] errors = self._validate_firmware_file(uploaded_file_path) if errors: self._push_firmware_info() return None, errors result = self._handle_firmware_file(uploaded_file_path)", "_run_post_flash_script(self): post_flash_script = self._settings.get_post_flash_script() if post_flash_script: self._logger.debug(\"Running post-flash script\") self._logger.debug(post_flash_script) commands = [line.strip()", ": %s\" % self._firmware_version) break elif f == \"Configuration.h\": self._logger.debug(\"Found Configuration.h, opening it...\")", "import os import re from threading import Thread class BaseFlasher: def __init__(self, settings,", "line in post_flash_script.splitlines()] self._printer.commands(commands) else: self._logger.debug(\"No script defined\") def _wait_post_flash_delay(self): self._logger.debug(\"Waiting post-flash delay...\")", "r = requests.get(flask.request.values[\"url\"]) self._logger.debug(\"Saving downloaded firmware...\") with tempfile.NamedTemporaryFile(delete=False) as temp: temp.write(r.content) temp_path =", "file_path): raise FlasherError(\"Unsupported function call.\") def handle_connected_event(self): if self._should_run_post_script: self._run_post_flash_script() self._should_run_post_script = False", "upload(self): self._logger.debug(\"Firmware uploaded by the user\") uploaded_file_path = flask.request.values[\"firmware_file.\" + self._settings.get_upload_path_suffix()] errors =", "FlasherError(\"Undefined function call\") def _push_firmware_info(self): self._logger.debug(\"Sending firmware info through websocket\") self._plugin_manager.send_plugin_message(self._identifier, dict( type=self._firmware_info_event_name()," ]
[ "OpenGLCffi.GLX import params @params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer']) def glXAssociateDMPbufferSGIX(dpy, pbuffer, params, dmbuffer):", "from OpenGLCffi.GLX import params @params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer']) def glXAssociateDMPbufferSGIX(dpy, pbuffer, params,", "import params @params(api='glx', prms=['dpy', 'pbuffer', 'params', 'dmbuffer']) def glXAssociateDMPbufferSGIX(dpy, pbuffer, params, dmbuffer): pass" ]
[ "this work for additional information regarding copyright # ownership. All rights reserved. This", "See the NOTICE file distributed with this work for additional information regarding copyright", "FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of", "This program and the accompanying materials are made available # under the terms", "java_ref=None): if java_ref is None: if type_name is None and field_name is None:", "http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type", "import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a temporal field.", "from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a", "program and the accompanying materials are made available # under the terms of", "import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range", "def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None: if type_name is None", "type_name=None, field_name=None, java_ref=None): if java_ref is None: if type_name is None and field_name", "# =============================================================================================== from pygw.config import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import", "available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from ..statistic import FieldStatistic", "All rights reserved. This program and the accompanying materials are made available #", "is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from ..statistic", "terms of the Apache License, Version 2.0 which accompanies this distribution and is", "Version 2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt #", "(c) 2013-2022 Contributors to the Eclipse Foundation # # See the NOTICE file", "NOTICE file distributed with this work for additional information regarding copyright # ownership.", "Tracks the time range of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def", "type_name is None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref =", "information regarding copyright # ownership. All rights reserved. This program and the accompanying", "FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks", "are made available # under the terms of the Apache License, Version 2.0", "Copyright (c) 2013-2022 Contributors to the Eclipse Foundation # # See the NOTICE", "..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic):", "# See the NOTICE file distributed with this work for additional information regarding", "if type_name is None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref", "field_name=None, java_ref=None): if java_ref is None: if type_name is None and field_name is", "import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\"", "accompanying materials are made available # under the terms of the Apache License,", "from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the", "...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a temporal", "2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # ===============================================================================================", "copyright # ownership. All rights reserved. This program and the accompanying materials are", "License, Version 2.0 which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt", "temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref", "available # under the terms of the Apache License, Version 2.0 which accompanies", "__init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None: if type_name is None and", "file distributed with this work for additional information regarding copyright # ownership. All", "reserved. This program and the accompanying materials are made available # under the", "under the terms of the Apache License, Version 2.0 which accompanies this distribution", "Apache License, Version 2.0 which accompanies this distribution and is # available at", "the Eclipse Foundation # # See the NOTICE file distributed with this work", "and the accompanying materials are made available # under the terms of the", "pygw.config import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval", "\"\"\" Tracks the time range of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE)", "java_ref is None: if type_name is None and field_name is None: java_ref =", "the time range of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self,", "STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None: if", "additional information regarding copyright # ownership. All rights reserved. This program and the", "time range of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None,", "2013-2022 Contributors to the Eclipse Foundation # # See the NOTICE file distributed", "which accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from", "FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None: if type_name is", "IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a temporal field. \"\"\"", "this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import", "to the Eclipse Foundation # # See the NOTICE file distributed with this", "for additional information regarding copyright # ownership. All rights reserved. This program and", "from pygw.config import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from", "ownership. All rights reserved. This program and the accompanying materials are made available", "# ownership. All rights reserved. This program and the accompanying materials are made", "the Apache License, Version 2.0 which accompanies this distribution and is # available", "of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None):", "# available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from ..statistic import", "a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if", "geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer", "work for additional information regarding copyright # ownership. All rights reserved. This program", "# under the terms of the Apache License, Version 2.0 which accompanies this", "Contributors to the Eclipse Foundation # # See the NOTICE file distributed with", "materials are made available # under the terms of the Apache License, Version", "the terms of the Apache License, Version 2.0 which accompanies this distribution and", "of the Apache License, Version 2.0 which accompanies this distribution and is #", "regarding copyright # ownership. All rights reserved. This program and the accompanying materials", "# # Copyright (c) 2013-2022 Contributors to the Eclipse Foundation # # See", "range of a temporal field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None,", "None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name, field_name)", "made available # under the terms of the Apache License, Version 2.0 which", "import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import", "is None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name,", "if java_ref is None: if type_name is None and field_name is None: java_ref", "the NOTICE file distributed with this work for additional information regarding copyright #", "None: if type_name is None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else:", "at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from ..statistic import FieldStatistic from", "=============================================================================================== from pygw.config import geowave_pkg from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType", "= FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None: if type_name", "and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name, field_name) super().__init__(java_ref,", "TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a temporal field. \"\"\" STATS_TYPE =", "Eclipse Foundation # # See the NOTICE file distributed with this work for", "distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg", "accompanies this distribution and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config", "with this work for additional information regarding copyright # ownership. All rights reserved.", "distributed with this work for additional information regarding copyright # ownership. All rights", "field. \"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is", "is None: if type_name is None and field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic()", "..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time", "rights reserved. This program and the accompanying materials are made available # under", "\"\"\" STATS_TYPE = FieldStatisticType(geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic.STATS_TYPE) def __init__(self, type_name=None, field_name=None, java_ref=None): if java_ref is None:", "# # See the NOTICE file distributed with this work for additional information", "# Copyright (c) 2013-2022 Contributors to the Eclipse Foundation # # See the", "and is # available at http://www.apache.org/licenses/LICENSE-2.0.txt # =============================================================================================== from pygw.config import geowave_pkg from", "the accompanying materials are made available # under the terms of the Apache", "from ..statistic import FieldStatistic from ..statistic_type import FieldStatisticType from ...base.interval import IntervalTransformer class", "class TimeRangeStatistic(FieldStatistic): \"\"\" Tracks the time range of a temporal field. \"\"\" STATS_TYPE", "field_name is None: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic() else: java_ref = geowave_pkg.core.geotime.store.statistics.TimeRangeStatistic(type_name, field_name) super().__init__(java_ref, IntervalTransformer())", "Foundation # # See the NOTICE file distributed with this work for additional" ]
[ "This empty file allows us to # use app/ as a python package", "# This empty file allows us to # use app/ as a python" ]
[ "= 0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >=", "startIndex else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex", "+ self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self,", "== -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength = endIndex -", "= Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types format data_types_patterns = parcher.c_parcher()", "QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu:", "in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while index >= 0: length", "+ length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text,", "color == \"green\": return Qt.green if color == \"yellow\": return Qt.yellow if color", "-----------------> single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) #", "startIndex = 0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex", "#white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment", "def highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index =", "index = expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index, length, format_)", "startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if color == \"red\":", "= self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if color == \"red\": return", "endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) -", "return Qt.blue if color == \"green\": return Qt.green if color == \"yellow\": return", "commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if color", "Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class", "self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in", "import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher", "return Qt.red if color == \"blue\": return Qt.blue if color == \"green\": return", "\"green\": return Qt.green if color == \"yellow\": return Qt.yellow if color == \"white\":", "# -----------------> single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format))", "self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern,", "single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format", "python from PyQt5.QtCore import QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat,", "data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format)", "keyword_patterns = parcher.c_parcher() # -----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format =", "give_color(self, color): if color == \"red\": return Qt.red if color == \"blue\": return", "keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern),", "self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data", "in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format single_comment_format = QTextCharFormat()", "-----------------> single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart", "self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"]))", "text): for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while", "if color == \"white\": return Qt.white if color == \"magenta\": return Qt.magenta if", "self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if color ==", "self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression =", "super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # ----------------->", "types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules =", "= QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format =", "# -----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single", "if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex", "= parcher.c_parcher() # -----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat()", "color == \"white\": return Qt.white if color == \"magenta\": return Qt.magenta if color", "QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format = QTextCharFormat()", "QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None):", "expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1: startIndex", "string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd", "multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string", "self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in", "== \"red\": return Qt.red if color == \"blue\": return Qt.blue if color ==", "self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier)", "startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength", "str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for", "func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold)", "self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while", "self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else:", "self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while index >= 0: length =", "Qt.blue if color == \"green\": return Qt.green if color == \"yellow\": return Qt.yellow", "return Qt.magenta if color == \"cyan\": return Qt.cyan def _reinit__(self): self.__init__(parent = self.parent)", "#!/usr/bin/env python from PyQt5.QtCore import QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter,", "Qt.red if color == \"blue\": return Qt.blue if color == \"green\": return Qt.green", "QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single", "pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format single_comment_format =", "\"magenta\": return Qt.magenta if color == \"cyan\": return Qt.cyan def _reinit__(self): self.__init__(parent =", "PyQt5.QtCore import QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from", "\"blue\": return Qt.blue if color == \"green\": return Qt.green if color == \"yellow\":", "parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types format data_types_patterns", "= QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern)", "1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex)", "if color == \"green\": return Qt.green if color == \"yellow\": return Qt.yellow if", "-----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold)", "line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\")", "def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns =", "parcher.c_parcher() # -----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"]))", "format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format", "str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text):", "= expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index, length, format_) index", "C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns", "startIndex + commentLength); def give_color(self, color): if color == \"red\": return Qt.red if", "QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent)", "Qt.green if color == \"yellow\": return Qt.yellow if color == \"white\": return Qt.white", "= [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format", "line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline", "# -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # ----------------->", "= parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types format", "str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_", "[(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format", "self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color):", "parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern", "format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd =", "if color == \"magenta\": return Qt.magenta if color == \"cyan\": return Qt.cyan def", "QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index", "= self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex", "self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) #", "!= 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text,", "format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern),", "else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex =", "keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100))", "while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1)", "QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent", "while index >= 0: length = expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text,", "= endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex", "-----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line", "func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white", "length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex)", ">= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength =", "return Qt.white if color == \"magenta\": return Qt.magenta if color == \"cyan\": return", "in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold)", "self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function", "self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line", "= self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex", "<filename>src/brusher/c_highlighter.py #!/usr/bin/env python from PyQt5.QtCore import QFile, QRegExp, Qt from PyQt5.QtGui import QFont,", "= len(text) - startIndex else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex,", "# -----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow", "single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # ----------------->", "single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan)", "- startIndex else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format)", "\"white\": return Qt.white if color == \"magenta\": return Qt.magenta if color == \"cyan\":", "self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength = endIndex - startIndex +", "startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def", "from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter):", "index >= 0: length = expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index", "self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"),", "0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text)", "if color == \"yellow\": return Qt.yellow if color == \"white\": return Qt.white if", "format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format", "== \"yellow\": return Qt.yellow if color == \"white\": return Qt.white if color ==", "self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for", "self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format =", "QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self,", "color == \"red\": return Qt.red if color == \"blue\": return Qt.blue if color", "-----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword", "class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher()", "self.setFormat(index, length, format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0", "Qt.yellow if color == \"white\": return Qt.white if color == \"magenta\": return Qt.magenta", "PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def", "func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern", "expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index, length, format_) index =", "syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent", "format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format", "parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() #", "= parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for", "import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self,", "comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format", "__init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher()", "format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState()", "index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() !=", "startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if", "single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart =", "= QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in self.highlight_rules:", "QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"]))", "QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression", "for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat()", "comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment", "data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"),", "= QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format = QTextCharFormat()", "highlightBlock(self, text): for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text)", "if color == \"red\": return Qt.red if color == \"blue\": return Qt.blue if", "\"red\": return Qt.red if color == \"blue\": return Qt.blue if color == \"green\":", "single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"), single_comment_format)) # -----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier)", "= expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1:", "\",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) #", "from PyQt5.QtCore import QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor", "Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent = parent parcher =", "for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format single_comment_format", "endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex +", "# -----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in", "-1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength = endIndex - startIndex", "== \"magenta\": return Qt.magenta if color == \"cyan\": return Qt.cyan def _reinit__(self): self.__init__(parent", "- startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength);", "Qt.white if color == \"magenta\": return Qt.magenta if color == \"cyan\": return Qt.cyan", "if color == \"blue\": return Qt.blue if color == \"green\": return Qt.green if", "self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format =", "QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher", "0: length = expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index + length)", "str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\")", "length, format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex = 0 if", "def give_color(self, color): if color == \"red\": return Qt.red if color == \"blue\":", "return Qt.green if color == \"yellow\": return Qt.yellow if color == \"white\": return", "self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # ----------------->", "function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format", "\"yellow\": return Qt.yellow if color == \"white\": return Qt.white if color == \"magenta\":", "== \"white\": return Qt.white if color == \"magenta\": return Qt.magenta if color ==", "parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types format data_types_patterns =", "color == \"magenta\": return Qt.magenta if color == \"cyan\": return Qt.cyan def _reinit__(self):", "-----------------> keyword format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]:", "self.keyword_format)) # -----------------> single line comment format single_comment_format = QTextCharFormat() single_comment_format.setForeground(QColor(0,0,0,100)) single_comment_format.setFontWeight(QFont.Courier) self.highlight_rules.append((QRegExp(\"//[^\\n]*\"),", "index + length) self.setCurrentBlockState(0) startIndex = 0 if self.previousBlockState() != 1: startIndex =", "== \"green\": return Qt.green if color == \"yellow\": return Qt.yellow if color ==", "commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text,", "self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # -----------------> single line comment format", "# -----------------> single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format))", ">= 0: length = expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index +", "startIndex) if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength", "data types format data_types_patterns = parcher.c_parcher() self.data_types_format = QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules", "QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import", "commentLength = len(text) - startIndex else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength()", "#print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format))", "expression = QRegExp(pattern) index = expression.indexIn(text) while index >= 0: length = expression.matchedLength()", "if endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength =", "self.parent = parent parcher = Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types", "== \"blue\": return Qt.blue if color == \"green\": return Qt.green if color ==", "#yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) #", "self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex ==", "pattern in data_types_patterns[\"data_types\"][\"words\"]] #print(\"data_types_uzunlugu: \",len(data_types_patterns[\"data_types\"][\"words\"])) # -----------------> function format func_format = QTextCharFormat() func_format.setForeground(Qt.red)", "self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold)", "self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) #", "= QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format = QTextCharFormat()", "QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter,", "from syntaxer import Syntax_Patcher class C_Highlighter(QSyntaxHighlighter): def __init__(self, parent=None): super(C_Highlighter, self).__init__(parent) self.parent =", "= QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format)) # ----------------->", "pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while index >=", "= QTextCharFormat() self.data_types_format.setForeground(self.give_color(data_types_patterns[\"data_types\"][\"color\"])) #yellow self.data_types_format.setFontWeight(QFont.ExtraBold) self.highlight_rules = [(QRegExp(pattern), self.data_types_format) for pattern in data_types_patterns[\"data_types\"][\"words\"]]", "QRegExp(pattern) index = expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index, length,", "self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0: endIndex =", "length = expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0)", "endIndex == -1: self.setCurrentBlockState(1) commentLength = len(text) - startIndex else: commentLength = endIndex", "return Qt.yellow if color == \"white\": return Qt.white if color == \"magenta\": return", "color == \"blue\": return Qt.blue if color == \"green\": return Qt.green if color", "for pattern, format_ in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while index", "expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex =", "commentLength); def give_color(self, color): if color == \"red\": return Qt.red if color ==", "color): if color == \"red\": return Qt.red if color == \"blue\": return Qt.blue", "startIndex) while startIndex >= 0: endIndex = self.commentEnd.indexIn(text, startIndex) if endIndex == -1:", "single_comment_format)) # -----------------> multiline comment format self.multi_comment_format = QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # ----------------->", "Syntax_Patcher() keyword_patterns = parcher.c_parcher() # -----------------> data types format data_types_patterns = parcher.c_parcher() self.data_types_format", "format self.keyword_format = QTextCharFormat() self.keyword_format.setForeground(self.give_color(keyword_patterns[\"keywords\"][\"color\"])) #white self.keyword_format.setFontWeight(QFont.Bold) for pattern in keyword_patterns[\"keywords\"][\"words\"]: self.highlight_rules.append((QRegExp(pattern), self.keyword_format))", "format_ in self.highlight_rules: expression = QRegExp(pattern) index = expression.indexIn(text) while index >= 0:", "import QFile, QRegExp, Qt from PyQt5.QtGui import QFont, QSyntaxHighlighter, QTextCharFormat, QColor from syntaxer", "func_format = QTextCharFormat() func_format.setForeground(Qt.red) func_format.setFontWeight(QFont.Bold) self.highlight_rules.append((QRegExp(\"\\\\b[A-Za-z0-9_]+(?=\\\\()\"), func_format)) # -----------------> keyword format self.keyword_format =", "= QTextCharFormat() str_format.setForeground(Qt.magenta) str_format.setFontWeight(QFont.DemiBold) self.highlight_rules.append((QRegExp(\"\\\".*\\\"\"), str_format)) self.commentStart = QRegExp(\"/\\\\*\") self.commentEnd = QRegExp(\"\\\\*/\") def", "0 if self.previousBlockState() != 1: startIndex = self.commentStart.indexIn(text, startIndex) while startIndex >= 0:", "QTextCharFormat() self.multi_comment_format.setForeground(Qt.cyan) self.multi_comment_format.setFontWeight(QFont.Courier) # -----------------> single line string format str_format = QTextCharFormat() str_format.setForeground(Qt.magenta)", "= QRegExp(pattern) index = expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index,", "= expression.matchedLength() self.setFormat(index, length, format_) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) startIndex", "self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if color == \"red\": return Qt.red", "+ commentLength); def give_color(self, color): if color == \"red\": return Qt.red if color", "self.setFormat(startIndex, commentLength, self.multi_comment_format) startIndex = self.commentStart.indexIn(text, startIndex + commentLength); def give_color(self, color): if", "color == \"yellow\": return Qt.yellow if color == \"white\": return Qt.white if color", "len(text) - startIndex else: commentLength = endIndex - startIndex + self.commentEnd.matchedLength() self.setFormat(startIndex, commentLength," ]
[ "Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick", "Copyright (C) 2019 Artech House (<EMAIL>) This file is part of Introduction to", "for M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n", "Show the form app.exec_() # Execute the app if __name__ == '__main__': main()", "= -0.02 elif target_type == 'Swerling 4': alpha = 0.873 beta = -0.27", "Introduction to Radar Using Python and MATLAB and can not be copied and/or", "tick label size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update", "of Introduction to Radar Using Python and MATLAB and can not be copied", "changes an input value. :return: \"\"\" # Get the parameters from the form", "'Swerling 1': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 2':", "House (<EMAIL>) This file is part of Introduction to Radar Using Python and", "from the form target_type = self.target_type.currentText() if target_type == 'Swerling 0': alpha =", "== 'Swerling 0': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling", "number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n ** alpha) for n in", "from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets import QApplication,", "alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 2': alpha =", "elif target_type == 'Swerling 4': alpha = 0.873 beta = -0.27 # Calculate", "for the updated plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '') #", "plot title and labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses',", "self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas to the vertical", "the tick label size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) #", "axes for the updated plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '')", "<NAME> On: 10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This", "the first display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when the user", "Using Python and MATLAB and can not be copied and/or distributed without the", "value. :return: \"\"\" # Get the parameters from the form number_of_pulses = int(self.number_of_pulses.text())", "__init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input boxes, when the user", "beta * n ** alpha) for n in np] # Clear the axes", "selected target type from the form target_type = self.target_type.currentText() if target_type == 'Swerling", "import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets", "import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure", "M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n **", "== 'Swerling 1': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling", "sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets import", "file is part of Introduction to Radar Using Python and MATLAB and can", "target_type = self.target_type.currentText() if target_type == 'Swerling 0': alpha = 0.8 beta =", "updated plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '') # Set the", "form = OptimumBinary() # Set the form form.show() # Show the form app.exec_()", "Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat", "QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from", "import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def", "== 'Swerling 2': alpha = 0.91 beta = -0.38 elif target_type == 'Swerling", "instance of QApplication form = OptimumBinary() # Set the form form.show() # Show", "results self.axes1.plot(np, m_optimum, '') # Set the plot title and labels self.axes1.set_title('Optimum M", "alpha = 0.91 beta = -0.38 elif target_type == 'Swerling 3': alpha =", "from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow,", "self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the plotting canvas fig =", "'Swerling 4': alpha = 0.873 beta = -0.27 # Calculate the optimum choice", "0.873 beta = -0.27 # Calculate the optimum choice for M np =", "alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 1': alpha =", "Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label size self.axes1.tick_params(labelsize=12) # Turn", "def start(): form = OptimumBinary() # Set the form form.show() # Show the", "numpy import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore", "-0.02 elif target_type == 'Swerling 4': alpha = 0.873 beta = -0.27 #", "form target_type = self.target_type.currentText() if target_type == 'Swerling 0': alpha = 0.8 beta", "== 'Swerling 4': alpha = 0.873 beta = -0.27 # Calculate the optimum", "m_optimum, '') # Set the plot title and labels self.axes1.set_title('Optimum M for Binary", "is part of Introduction to Radar Using Python and MATLAB and can not", "from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) #", "copied and/or distributed without the express permission of Artech House. \"\"\" import sys", "as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__()", "canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the", "Get the selected target type from the form target_type = self.target_type.currentText() if target_type", "Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow", "# Set the form form.show() # Show the form app.exec_() # Execute the", "Update the canvas self.my_canvas.draw() def start(): form = OptimumBinary() # Set the form", "class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input", "== 'Swerling 3': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling", "Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas to the", "elif target_type == 'Swerling 1': alpha = 0.8 beta = -0.02 elif target_type", "the user changes an input value. :return: \"\"\" # Get the parameters from", "# A new instance of QApplication form = OptimumBinary() # Set the form", "linewidth=0.5) # Update the canvas self.my_canvas.draw() def start(): form = OptimumBinary() # Set", "= 0.91 beta = -0.38 elif target_type == 'Swerling 3': alpha = 0.8", "** alpha) for n in np] # Clear the axes for the updated", "to Radar Using Python and MATLAB and can not be copied and/or distributed", "elif target_type == 'Swerling 2': alpha = 0.91 beta = -0.38 elif target_type", "form form.show() # Show the form app.exec_() # Execute the app if __name__", "self)) # Update the canvas for the first display self._update_canvas() def _update_canvas(self): \"\"\"", "np] # Clear the axes for the updated plot self.axes1.clear() # Display the", "self).__init__() self.setupUi(self) # Connect to the input boxes, when the user presses enter", "beta = -0.02 elif target_type == 'Swerling 1': alpha = 0.8 beta =", "input value. :return: \"\"\" # Get the parameters from the form number_of_pulses =", "# Show the form def main(): app = QApplication(sys.argv) # A new instance", "Connect to the input boxes, when the user presses enter the form updates", "# Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) #", "by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>)", "Update the canvas for the first display self._update_canvas() def _update_canvas(self): \"\"\" Update the", "form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target type from the form", "from numpy import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import", "for n in np] # Clear the axes for the updated plot self.axes1.clear()", "the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the plotting", "with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This file is part of", "the canvas for the first display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure", "form def main(): app = QApplication(sys.argv) # A new instance of QApplication form", "app = QApplication(sys.argv) # A new instance of QApplication form = OptimumBinary() #", "new instance of QApplication form = OptimumBinary() # Set the form form.show() #", "form.show() # Show the form app.exec_() # Execute the app if __name__ ==", "the plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) #", "Radar Using Python and MATLAB and can not be copied and/or distributed without", "the form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target type from the", "np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n ** alpha)", "to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for", "plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '') # Set the plot", "size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label size self.axes1.tick_params(labelsize=12) # Turn on", "4': alpha = 0.873 beta = -0.27 # Calculate the optimum choice for", "fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas", "plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add", "Set the plot title and labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number", "int(self.number_of_pulses.text()) # Get the selected target type from the form target_type = self.target_type.currentText()", "input boxes, when the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) #", "= -0.02 elif target_type == 'Swerling 2': alpha = 0.91 beta = -0.38", "the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the", "beta = -0.02 elif target_type == 'Swerling 4': alpha = 0.873 beta =", "self.setupUi(self) # Connect to the input boxes, when the user presses enter the", "when the user changes an input value. :return: \"\"\" # Get the parameters", "= -0.38 elif target_type == 'Swerling 3': alpha = 0.8 beta = -0.02", "the form target_type = self.target_type.currentText() if target_type == 'Swerling 0': alpha = 0.8", "m_optimum = [ceil(10.0 ** beta * n ** alpha) for n in np]", "MATLAB and can not be copied and/or distributed without the express permission of", "Clear the axes for the updated plot self.axes1.clear() # Display the results self.axes1.plot(np,", "\"\"\" Update the figure when the user changes an input value. :return: \"\"\"", "= 0.873 beta = -0.27 # Calculate the optimum choice for M np", "Display the results self.axes1.plot(np, m_optimum, '') # Set the plot title and labels", "ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import", "def _update_canvas(self): \"\"\" Update the figure when the user changes an input value.", "# Clear the axes for the updated plot self.axes1.clear() # Display the results", "alpha = 0.873 beta = -0.27 # Calculate the optimum choice for M", "PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT", "the plot title and labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of", "= [ceil(10.0 ** beta * n ** alpha) for n in np] #", "# Set up a figure for the plotting canvas fig = Figure() self.axes1", "import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as", "the updated plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '') # Set", "grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def start(): form = OptimumBinary()", "-0.38 elif target_type == 'Swerling 3': alpha = 0.8 beta = -0.02 elif", "import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from", "Set the form form.show() # Show the form app.exec_() # Execute the app", "matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import", "Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label", "n ** alpha) for n in np] # Clear the axes for the", "# Get the parameters from the form number_of_pulses = int(self.number_of_pulses.text()) # Get the", "and MATLAB and can not be copied and/or distributed without the express permission", "part of Introduction to Radar Using Python and MATLAB and can not be", "express permission of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from", "the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for the", "Show the form def main(): app = QApplication(sys.argv) # A new instance of", "File: optimum_binary_example.py Created by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright (C) 2019", "\"\"\" # Get the parameters from the form number_of_pulses = int(self.number_of_pulses.text()) # Get", "# Calculate the optimum choice for M np = arange(1, number_of_pulses+1) m_optimum =", "Calculate the optimum choice for M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0", "House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil", "the results self.axes1.plot(np, m_optimum, '') # Set the plot title and labels self.axes1.set_title('Optimum", "= self.target_type.currentText() if target_type == 'Swerling 0': alpha = 0.8 beta = -0.02", "OptimumBinary() # Set the form form.show() # Show the form def main(): app", "optimum choice for M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta", "display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when the user changes an", "[ceil(10.0 ** beta * n ** alpha) for n in np] # Clear", "distributed without the express permission of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui", "presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for", "label size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the", "-0.02 elif target_type == 'Swerling 1': alpha = 0.8 beta = -0.02 elif", "enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the", "# Update the canvas for the first display self._update_canvas() def _update_canvas(self): \"\"\" Update", "# Set the plot title and labels self.axes1.set_title('Optimum M for Binary Integration', size=14)", "main(): app = QApplication(sys.argv) # A new instance of QApplication form = OptimumBinary()", "the form form.show() # Show the form def main(): app = QApplication(sys.argv) #", "figure for the plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas =", "from the form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target type from", "self.my_canvas = FigureCanvas(fig) # Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea,", "user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure", "(C) 2019 Artech House (<EMAIL>) This file is part of Introduction to Radar", "up a figure for the plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111)", "elif target_type == 'Swerling 3': alpha = 0.8 beta = -0.02 elif target_type", "QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class", "M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set", "-0.27 # Calculate the optimum choice for M np = arange(1, number_of_pulses+1) m_optimum", "self.axes1.set_ylabel('M', size=12) # Set the tick label size self.axes1.tick_params(labelsize=12) # Turn on the", "Set the form form.show() # Show the form def main(): app = QApplication(sys.argv)", "first display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when the user changes", "for the plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig)", "Set up a figure for the plotting canvas fig = Figure() self.axes1 =", "Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def start():", "size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label size", "= OptimumBinary() # Set the form form.show() # Show the form app.exec_() #", "alpha) for n in np] # Clear the axes for the updated plot", "the axes for the updated plot self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum,", "self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label size self.axes1.tick_params(labelsize=12)", "size=12) # Set the tick label size self.axes1.tick_params(labelsize=12) # Turn on the grid", "Get the parameters from the form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected", "Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange,", "_update_canvas(self): \"\"\" Update the figure when the user changes an input value. :return:", "a figure for the plotting canvas fig = Figure() self.axes1 = fig.add_subplot(111) self.my_canvas", "canvas self.my_canvas.draw() def start(): form = OptimumBinary() # Set the form form.show() #", "super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input boxes, when the user presses", "alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 4': alpha =", "Project: RadarBook File: optimum_binary_example.py Created by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright", "optimum_binary_example.py Created by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech", "* n ** alpha) for n in np] # Clear the axes for", "to the input boxes, when the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas)", "Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input boxes, when", "target_type == 'Swerling 4': alpha = 0.873 beta = -0.27 # Calculate the", "self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for the first display self._update_canvas() def", "10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This file is", "Created with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This file is part", "from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas,", "2019 Artech House (<EMAIL>) This file is part of Introduction to Radar Using", "form.show() # Show the form def main(): app = QApplication(sys.argv) # A new", "self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def start(): form = OptimumBinary() #", "size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas", "self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for the first display self._update_canvas()", "for the first display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when the", "the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a", "of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the tick label size self.axes1.tick_params(labelsize=12) #", "form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the plotting canvas", "Python and MATLAB and can not be copied and/or distributed without the express", "target_type == 'Swerling 0': alpha = 0.8 beta = -0.02 elif target_type ==", "= fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas to the vertical layout", "the form form.show() # Show the form app.exec_() # Execute the app if", "self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the plotting canvas fig = Figure()", "and labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M',", "= 0.8 beta = -0.02 elif target_type == 'Swerling 2': alpha = 0.91", "target_type == 'Swerling 2': alpha = 0.91 beta = -0.38 elif target_type ==", "Artech House (<EMAIL>) This file is part of Introduction to Radar Using Python", "'') # Set the plot title and labels self.axes1.set_title('Optimum M for Binary Integration',", "of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import", "# Connect to the input boxes, when the user presses enter the form", "self.target_type.currentText() if target_type == 'Swerling 0': alpha = 0.8 beta = -0.02 elif", "and/or distributed without the express permission of Artech House. \"\"\" import sys from", "= Figure() self.axes1 = fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas to", ":return: \"\"\" # Get the parameters from the form number_of_pulses = int(self.number_of_pulses.text()) #", "-0.02 elif target_type == 'Swerling 2': alpha = 0.91 beta = -0.38 elif", "updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up a figure for the plotting canvas fig", "when the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set up", "self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) #", "NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self)", "on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def start(): form", "'Swerling 2': alpha = 0.91 beta = -0.38 elif target_type == 'Swerling 3':", "the selected target type from the form target_type = self.target_type.currentText() if target_type ==", "# Display the results self.axes1.plot(np, m_optimum, '') # Set the plot title and", "n in np] # Clear the axes for the updated plot self.axes1.clear() #", "# Show the form app.exec_() # Execute the app if __name__ == '__main__':", "an input value. :return: \"\"\" # Get the parameters from the form number_of_pulses", "(FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self):", "Created by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech House", "This file is part of Introduction to Radar Using Python and MATLAB and", "the figure when the user changes an input value. :return: \"\"\" # Get", "can not be copied and/or distributed without the express permission of Artech House.", "the express permission of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow", "beta = -0.27 # Calculate the optimum choice for M np = arange(1,", "for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12) # Set the", "def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input boxes, when the", "# Update the canvas self.my_canvas.draw() def start(): form = OptimumBinary() # Set the", "of QApplication form = OptimumBinary() # Set the form form.show() # Show the", "FigureCanvas(fig) # Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self))", "= -0.02 elif target_type == 'Swerling 1': alpha = 0.8 beta = -0.02", "the canvas self.my_canvas.draw() def start(): form = OptimumBinary() # Set the form form.show()", "= QApplication(sys.argv) # A new instance of QApplication form = OptimumBinary() # Set", "NavigationToolbar(self.my_canvas, self)) # Update the canvas for the first display self._update_canvas() def _update_canvas(self):", "target_type == 'Swerling 1': alpha = 0.8 beta = -0.02 elif target_type ==", "figure when the user changes an input value. :return: \"\"\" # Get the", "start(): form = OptimumBinary() # Set the form form.show() # Show the form", "user changes an input value. :return: \"\"\" # Get the parameters from the", "RadarBook File: optimum_binary_example.py Created by: <NAME> On: 10/11/2018 Created with: PyCharm Copyright (C)", "= -0.27 # Calculate the optimum choice for M np = arange(1, number_of_pulses+1)", "Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update", "in np] # Clear the axes for the updated plot self.axes1.clear() # Display", "number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target type from the form target_type", "'Swerling 0': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 1':", "0': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 1': alpha", "2': alpha = 0.91 beta = -0.38 elif target_type == 'Swerling 3': alpha", "layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for the first display", "= 0.8 beta = -0.02 elif target_type == 'Swerling 4': alpha = 0.873", "matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect", "title and labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12)", "QApplication form = OptimumBinary() # Set the form form.show() # Show the form", "and can not be copied and/or distributed without the express permission of Artech", "0.8 beta = -0.02 elif target_type == 'Swerling 1': alpha = 0.8 beta", "the optimum choice for M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 **", "PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This file is part of Introduction", "QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar)", "matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow):", "'Swerling 3': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 4':", "OptimumBinary() # Set the form form.show() # Show the form app.exec_() # Execute", "vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas for the first", "permission of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy", "the parameters from the form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target", "# Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def", "import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to", "NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__,", "= FigureCanvas(fig) # Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas,", "canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas) self.addToolBar(QtCore.Qt.TopToolBarArea, NavigationToolbar(self.my_canvas, self)) # Update the canvas", "import Ui_MainWindow from numpy import arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from", "Update the figure when the user changes an input value. :return: \"\"\" #", "On: 10/11/2018 Created with: PyCharm Copyright (C) 2019 Artech House (<EMAIL>) This file", "arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n ** alpha) for n", "if target_type == 'Swerling 0': alpha = 0.8 beta = -0.02 elif target_type", "# Set the form form.show() # Show the form def main(): app =", "form = OptimumBinary() # Set the form form.show() # Show the form def", "QApplication(sys.argv) # A new instance of QApplication form = OptimumBinary() # Set the", "target_type == 'Swerling 3': alpha = 0.8 beta = -0.02 elif target_type ==", "the input boxes, when the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas)", "3': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 4': alpha", "1': alpha = 0.8 beta = -0.02 elif target_type == 'Swerling 2': alpha", "self.axes1.plot(np, m_optimum, '') # Set the plot title and labels self.axes1.set_title('Optimum M for", "arange, ceil from PyQt5.QtWidgets import QApplication, QMainWindow from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg", "Figure class OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the", "\"\"\" Project: RadarBook File: optimum_binary_example.py Created by: <NAME> On: 10/11/2018 Created with: PyCharm", "0.8 beta = -0.02 elif target_type == 'Swerling 4': alpha = 0.873 beta", "beta = -0.02 elif target_type == 'Swerling 2': alpha = 0.91 beta =", "self.my_canvas.draw() def start(): form = OptimumBinary() # Set the form form.show() # Show", "(<EMAIL>) This file is part of Introduction to Radar Using Python and MATLAB", "= OptimumBinary() # Set the form form.show() # Show the form def main():", "canvas for the first display self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when", "labels self.axes1.set_title('Optimum M for Binary Integration', size=14) self.axes1.set_xlabel('Number of Pulses', size=12) self.axes1.set_ylabel('M', size=12)", "self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw()", "0.8 beta = -0.02 elif target_type == 'Swerling 2': alpha = 0.91 beta", "# Set the tick label size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':',", "OptimumBinary(QMainWindow, Ui_MainWindow): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) # Connect to the input boxes,", "the form def main(): app = QApplication(sys.argv) # A new instance of QApplication", "form form.show() # Show the form def main(): app = QApplication(sys.argv) # A", "parameters from the form number_of_pulses = int(self.number_of_pulses.text()) # Get the selected target type", "# Get the selected target type from the form target_type = self.target_type.currentText() if", "not be copied and/or distributed without the express permission of Artech House. \"\"\"", "\"\"\" import sys from Chapter06.ui.OptimumBinary_ui import Ui_MainWindow from numpy import arange, ceil from", "fig.add_subplot(111) self.my_canvas = FigureCanvas(fig) # Add the canvas to the vertical layout self.verticalLayout.addWidget(self.my_canvas)", "= 0.8 beta = -0.02 elif target_type == 'Swerling 1': alpha = 0.8", "target type from the form target_type = self.target_type.currentText() if target_type == 'Swerling 0':", "choice for M np = arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta *", "self.axes1.clear() # Display the results self.axes1.plot(np, m_optimum, '') # Set the plot title", "beta = -0.38 elif target_type == 'Swerling 3': alpha = 0.8 beta =", "= arange(1, number_of_pulses+1) m_optimum = [ceil(10.0 ** beta * n ** alpha) for", "from matplotlib.backends.qt_compat import QtCore from matplotlib.backends.backend_qt5agg import (FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure", "be copied and/or distributed without the express permission of Artech House. \"\"\" import", "type from the form target_type = self.target_type.currentText() if target_type == 'Swerling 0': alpha", "boxes, when the user presses enter the form updates self.number_of_pulses.returnPressed.connect(self._update_canvas) self.target_type.currentIndexChanged.connect(self._update_canvas) # Set", "= int(self.number_of_pulses.text()) # Get the selected target type from the form target_type =", "the grid self.axes1.grid(linestyle=':', linewidth=0.5) # Update the canvas self.my_canvas.draw() def start(): form =", "0.91 beta = -0.38 elif target_type == 'Swerling 3': alpha = 0.8 beta", "Set the tick label size self.axes1.tick_params(labelsize=12) # Turn on the grid self.axes1.grid(linestyle=':', linewidth=0.5)", "def main(): app = QApplication(sys.argv) # A new instance of QApplication form =", "without the express permission of Artech House. \"\"\" import sys from Chapter06.ui.OptimumBinary_ui import", "self._update_canvas() def _update_canvas(self): \"\"\" Update the figure when the user changes an input", "A new instance of QApplication form = OptimumBinary() # Set the form form.show()", "** beta * n ** alpha) for n in np] # Clear the" ]
[ "request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert request.called assert result is expected", "import httpx import pytest import respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect,", "False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", )", "\"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False),", "test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\",", "respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201),", "(httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected):", "False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], )", "False), ], ) def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect =", "], ) def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect", "pytest import respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200),", "\"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert request.called assert result", ") def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result", "import pytest import respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [", "<reponame>initfve/notify-server import httpx import pytest import respx from app import auth @respx.mock @pytest.mark.parametrize(", "True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False),", "httpx import pytest import respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\",", "(httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect", ") request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert request.called assert result is", "import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False),", "False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request = respx.post(", "(httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405),", "auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout,", "respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert request.called assert", "app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError,", "expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\")", "@respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False),", "(httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def", "(httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500),", "False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request", "True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False),", "import respx from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True),", "= respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert request.called", "(httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429),", "False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect,", "from app import auth @respx.mock @pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True),", "@pytest.mark.parametrize( \"side_effect, expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400),", "(httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request =", "(httpx.Response(500), False), (httpx.Response(429), False), ], ) def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\",", "(httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False), ],", "def test_url_authenticate_user(side_effect, expected): request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result =", "request = respx.post( \"https://auth.example.org/login\", ) request.side_effect = side_effect result = auth.url_authenticate_user(\"john\", \"secret\") assert", "[ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False),", "False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403), False), (httpx.Response(405), False), (httpx.Response(500), False), (httpx.Response(429), False),", "expected\", [ (httpx.Response(200), True), (httpx.Response(201), True), (httpx.ConnectError, False), (httpx.ConnectTimeout, False), (httpx.Response(400), False), (httpx.Response(403)," ]
[ "= settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ],", "\"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial,", "uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def", "\"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config =", "[ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa):", "# Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(),", "(1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start() # Assert", "make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is None", "mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act", ") timeout: int = 20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list ==", ") serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent", "name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True)", "import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from", "test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\",", "], ) # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0), call(1),", "acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent =", "mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg", "quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import", "master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint:", "= zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\")", "mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list ==", "uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.start() #", "expected_acq_result[(0, i)] = (value, 0.0) # Assert assert not acq_result is None assert", "= mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type", "int = 20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3),", "return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): #", "unittest.mock import call import numpy as np import pytest from zhinst import qcodes", "make_hdawg(mocker): def _make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG", "serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name =", "\"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings(", "uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings =", "list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout:", ") serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\",", "Repository: https://gitlab.com/quantify-os/quantify-scheduler # Licensed according to the LICENCE file on the master branch", "serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name =", "make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy", "call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg:", "1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component", "str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name", "copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial,", "= name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer", "return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings,", "test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\",", "assert (0, 2) in acq_result for key in acq_result: assert acq_result[key] == expected_acq_result[key]", "hdawg._serial = serial hdawg.awgs = [None] * 4 for i in range(4): hdawg.awgs[i]", "\"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"])", "mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop() # Assert assert", "expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config = ZIDeviceConfig(", "def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig(", "def _make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA =", "expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\")", "zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial", "import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst", "# pylint: disable=redefined-outer-name from __future__ import annotations from typing import Any, Dict, Tuple", "zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa(", "serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\"))", "acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\")", "import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule", "get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called()", "helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig,", "\"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result", "(0, 2) in acq_result for key in acq_result: assert acq_result[key] == expected_acq_result[key] def", "= mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()),", "ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker):", "in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\")", "mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type )", "hdawg.retrieve_acquisition() # Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg:", "uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20", "== [ call(0), call(1), call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def", "quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import (", "mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start() # Assert assert", "Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings", "\"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\")", "] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent", "timeout: int = 20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [", "hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): #", ") mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[])", "to the LICENCE file on the master branch # pylint: disable=missing-module-docstring # pylint:", ") -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial", "[ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act", "def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\",", "( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture", "apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings =", "\"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.start()", "# pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0:", "assert not acq_result is None assert (0, 2) in acq_result for key in", "settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\",", "@pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\")", "= (value, 0.0) # Assert assert not acq_result is None assert (0, 2)", "mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result =", "Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert", "resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\",", "qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend", "= 20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3), call(2),", "(value, 0.0) # Assert assert not acq_result is None assert (0, 2) in", "\"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] =", "in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert assert not acq_result is", "test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\",", "(0, mocker.Mock()), ], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa):", "def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg,", "def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg,", "for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent =", "[ (0, mocker.Mock()), ], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker,", "hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ] for", "from unittest.mock import call import numpy as np import pytest from zhinst import", "mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act", "uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange", "from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import", "call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange", "call(1), call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): #", "= ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply =", "assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\",", "as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, )", "= ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\")", "# Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int", "serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas =", "quantum analyzer setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True)", "hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument)", "(3, mocker.Mock()), ], ) timeout: int = 20 # Act hdawg.wait_done(timeout) # Assert", "branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name", "call(0), call(1), call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg):", "mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\",", "import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial: str ) ->", "uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\"", "mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas = [None] * 1", "mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\",", "serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config)", "# Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is None def test_hdawg_wait_done(mocker,", "get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1,", "not acq_result is None assert (0, 2) in acq_result for key in acq_result:", "\"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.stop()", "# Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"),", "i)] = (value, 0.0) # Assert assert not acq_result is None assert (0,", "mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop() #", "\"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()),", "mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type", "serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent =", "return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange", "quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import", "] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa):", "\"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg):", "Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ] for i in", "== [ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def", "value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert assert not acq_result", "on the master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring", "apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\")", "mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int = 20", "Licensed according to the LICENCE file on the master branch # pylint: disable=missing-module-docstring", "assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for i in range(4):", "], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange", "in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent", "test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\")", "# Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i,", "is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy", "= make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) #", "uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield", ") # Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1),", "Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config", "test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\")", "pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__", "(3, mocker.Mock()), ], ) # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [", "\"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is None def", "typing import Any, Dict, Tuple from pathlib import Path from unittest.mock import call", "make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings,", "mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert", "= make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is", "uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.stop() #", "call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg:", "2) in acq_result for key in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker,", "\"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) #", "Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent =", "call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker,", "# Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\",", "instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def", "# pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations from typing import", ") # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa:", "key in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa:", "Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i, value", "acq_result is None assert (0, 2) in acq_result for key in acq_result: assert", "(1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop() # Assert", "serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") #", "# Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent", "return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config)", "<reponame>quantify-os/quantify-scheduler # Repository: https://gitlab.com/quantify-os/quantify-scheduler # Licensed according to the LICENCE file on the", "= zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def", "def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result =", "the quantum analyzer setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None,", "from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial: str", "def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa:", "test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(),", "Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\",", "* 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return", "= mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas = [None] *", "\"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\")", "# Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): #", "# Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ] for i", "mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) #", "Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent =", "def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings(", "zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return", "yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent", "_make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent =", "(0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int =", "from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg(", "= mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg", "Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None", ") # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2),", "Assert assert not acq_result is None assert (0, 2) in acq_result for key", "= mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) #", "settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], )", "# Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings", ") from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def", "] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent", "= mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True)", "make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg)", "hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()),", "annotations from typing import Any, Dict, Tuple from pathlib import Path from unittest.mock", "uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg =", "return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial:", "as np import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as", "assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\",", "component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker):", "mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result:", "ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\",", "Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg):", "# Licensed according to the LICENCE file on the master branch # pylint:", "zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 #", "make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA", "= settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.stop() # Assert", "from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components", "wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3,", "list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) #", "test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\",", "= make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data", "= name hdawg._serial = serial hdawg.awgs = [None] * 4 for i in", "LICENCE file on the master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring #", "# Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent", "= make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize =", "= dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) #", "_make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG,", "pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}),", "mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs = [None] * 4", "zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings", "(0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop()", "= hdawg.retrieve_acquisition() # Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange", "@pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\")", ") -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial", "mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any]", "return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict()", "zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial", "i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\",", "return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\",", "hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy =", "hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\",", "def make_hdawg(mocker): def _make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg:", "= serial hdawg.awgs = [None] * 4 for i in range(4): hdawg.awgs[i] =", "acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i, value in", "component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str )", "acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\")", "# Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0),", "name hdawg._serial = serial hdawg.awgs = [None] * 4 for i in range(4):", "name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True)", "{0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path,", "uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\",", "Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers,", "i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg)", "# Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0,", "20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1),", "hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings(", "for key in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange", "yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str ) ->", "_make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent:", "uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument", "uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config =", "= mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa", "make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config", "# Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def", "uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ],", "is None assert (0, 2) in acq_result for key in acq_result: assert acq_result[key]", "config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings,", "# pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from", "= uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i, value in enumerate(expected_data):", "(1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int = 20 #", "import Any, Dict, Tuple from pathlib import Path from unittest.mock import call import", "uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\",", ") # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa:", "disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations from typing import Any, Dict,", "pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations from", "qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs = [None]", "settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called()", "hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs =", "numpy as np import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers", "__future__ import annotations from typing import Any, Dict, Tuple from pathlib import Path", "hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config =", "(0, mocker.Mock()), ], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa):", "= mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 =", "], ) # Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3), call(2),", "\"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 # Act uhfqa.wait_done(timeout) #", "quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name:", "def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg,", "component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg:", "disable=redefined-outer-name from __future__ import annotations from typing import Any, Dict, Tuple from pathlib", "call import numpy as np import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst", "mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start() # Assert assert get_awg_spy.call_args_list ==", "make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act", "in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return", "-> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial =", "* 4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg)", "def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\",", "according to the LICENCE file on the master branch # pylint: disable=missing-module-docstring #", "ZIAcquisitionConfig(1, {0: resolver}), ) mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\"))", "[ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int", "pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst", "def _make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG =", "settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from", "from __future__ import annotations from typing import Any, Dict, Tuple from pathlib import", "= np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\",", "range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy", "dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert", "def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig(", "uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref,", "settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\")", "instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg):", "# the quantum analyzer setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] =", "hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act", "Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings =", "mocker.Mock()), ], ) timeout: int = 20 # Act hdawg.wait_done(timeout) # Assert assert", "_make_uhfqa( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA,", "config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply", "\"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int,", "from typing import Any, Dict, Tuple from pathlib import Path from unittest.mock import", "https://gitlab.com/quantify-os/quantify-scheduler # Licensed according to the LICENCE file on the master branch #", "mocker.Mock()), ], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): #", "# Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(),", "[None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa)", "= serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas", "mocker.Mock()), ], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): #", "mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int = 20 # Act hdawg.wait_done(timeout) #", "analyzer setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component", "= [None] * 4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component", "= mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs = [None] *", "mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings =", "Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent =", "i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert assert not", "-> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial =", "pylint: disable=redefined-outer-name from __future__ import annotations from typing import Any, Dict, Tuple from", "= settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker,", "import numpy as np import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import", "zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def", "zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from", "disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1, {0: resolver}), )", "Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings)", "Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for i in", "mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\")", "Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings)", "zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(),", "mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name:", "return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str, serial: str", "call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg):", "# Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\")", "mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def", "= make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [", "get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called()", "make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa):", "ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def", "hdawg.name = name hdawg._serial = serial hdawg.awgs = [None] * 4 for i", "mocker.Mock()), ], ) # Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3),", "mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture", "the master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring #", "serial hdawg.awgs = [None] * 4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG,", "= mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path,", "Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\")", "file on the master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint:", "str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") uhfqa: qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name", "call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): #", "make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 # Act uhfqa.wait_done(timeout)", "uhfqa.name = name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum", "import annotations from typing import Any, Dict, Tuple from pathlib import Path from", "Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def resolver(uhfqa): # pylint:", "(0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start()", "import Path from unittest.mock import call import numpy as np import pytest from", "from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig, ZIDeviceConfig, ) from quantify_scheduler.instrument_coordinator.components import zhinst from quantify_scheduler.types", "# Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() #", "[ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker,", "test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\")", "mocker.patch.object(settings.ZISettings, \"serialize\") mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config)", "np.ones(64) def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"),", "zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial:", "hdawg.awgs = [None] * 4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True)", "], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange", ") serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\")", "\"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") #", "uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\",", "from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import", "(3, mocker.Mock()), ], ) # Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [", "instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the", "call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def test_hdawg_stop(mocker, make_hdawg): # Arrange", "(2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start() # Assert assert get_awg_spy.call_args_list", "settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called()", "return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int],", "str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name", "\"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()),", "\"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\")", "enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert assert not acq_result is None", "None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") #", "= settings.ZISettings( list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.start() # Assert", "apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result", "= make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize =", "wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 # Act uhfqa.wait_done(timeout) # Assert", "test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\",", "range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent", "Tuple from pathlib import Path from unittest.mock import call import numpy as np", "# Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent", "resolver(uhfqa): # pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), ZIAcquisitionConfig(1,", "# pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations", "(2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list", "Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ]", "(2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int = 20 # Act hdawg.wait_done(timeout)", "== [ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).run.assert_called() def", "hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None )", "from pathlib import Path from unittest.mock import call import numpy as np import", "disable=missing-module-docstring # pylint: disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import", "Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()),", "mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs", "call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\",", "copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data", "== expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done", "settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg:", "acq_result for key in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): #", "hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert assert", "the LICENCE file on the master branch # pylint: disable=missing-module-docstring # pylint: disable=missing-class-docstring", "Any] = dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0)", "None assert (0, 2) in acq_result for key in acq_result: assert acq_result[key] ==", "quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial: str )", "range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component", "uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for", "Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ]", "in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa:", "= make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 # Act", "pathlib import Path from unittest.mock import call import numpy as np import pytest", "make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings", "Dict, Tuple from pathlib import Path from unittest.mock import call import numpy as", "\"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings =", "call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\")", "zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result", "hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent =", "Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str, serial: str ) -> zhinst.HDAWGInstrumentCoordinatorComponent:", "make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"),", "f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent", "component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\",", "# Repository: https://gitlab.com/quantify-os/quantify-scheduler # Licensed according to the LICENCE file on the master", "for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): #", "uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument)", "make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout:", "\"dev1234\") def test_hdawg_start(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy =", "import call import numpy as np import pytest from zhinst import qcodes from", "str ) -> zhinst.HDAWGInstrumentCoordinatorComponent: mocker.patch(\"qcodes.instrument.Instrument.record_instance\") hdawg: qcodes.HDAWG = mocker.create_autospec(qcodes.HDAWG, instance=True) hdawg.name = name", "], ) timeout: int = 20 # Act hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list", "\"get_instr\", return_value=hdawg) return component yield _make_hdawg @pytest.fixture def make_uhfqa(mocker): def _make_uhfqa( name: str,", "name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup", "\"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert", "in acq_result for key in acq_result: assert acq_result[key] == expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa):", "Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ]", "make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0,", "\"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa)", "import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from", "instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0]", "def test_uhfqa_start(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings(", "# Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\")", "[None] * 4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component =", "get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout)", "apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2", "Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None", "Dict[Tuple[int, int], Any] = dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)] =", "f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): #", "for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\",", ") serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act", "Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done = mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int =", "def test_hdawg_stop(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy = mocker.patch.object(hdawg,", "test_uhfqa_stop(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(),", "zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), ], )", "0.0) # Assert assert not acq_result is None assert (0, 2) in acq_result", "qcodes.UHFQA = mocker.create_autospec(qcodes.UHFQA, instance=True) uhfqa.name = name uhfqa._serial = serial uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG,", "pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations from typing import Any,", "return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type )", "# Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings = settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"),", "None def test_hdawg_wait_done(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") get_awg_spy =", "uhfqa.awg = mocker.create_autospec(qcodes.uhfqa.AWG, instance=True) # the quantum analyzer setup \"qas\" uhfqa.qas = [None]", "make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings,", "mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=uhfqa) return component yield _make_uhfqa def test_initialize_hdawg(make_hdawg): make_hdawg(\"hdawg0\", \"dev1234\") def test_hdawg_start(mocker,", "[ call(0), call(1), call(2), call(3), ] for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker,", "expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)]", "\"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data =", "4 for i in range(4): hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref,", "\"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings", "# Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for i", "Act acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg):", "instance=True) hdawg.name = name hdawg._serial = serial hdawg.awgs = [None] * 4 for", "range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config", "zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize", "def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64)", "import helpers as zi_helpers from quantify_scheduler.backends.zhinst import settings from quantify_scheduler.backends.zhinst_backend import ( ZIAcquisitionConfig,", "def resolver(uhfqa): # pylint: disable=unused-argument return expected_data config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(),", "ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings,", "for i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent =", "call(1), call(0), ] for i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def", "= mocker.patch.object(uhfqa.instrument.awg, \"wait_done\") timeout: int = 20 # Act uhfqa.wait_done(timeout) # Assert wait_done.assert_called_with(timeout)", "uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa:", "mocker.Mock()), (1, mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) # Act hdawg.start() #", "hdawg.wait_done(timeout) # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for", "mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) #", "= settings.ZISerializeSettings( f\"ic_{hdawg.instrument.name}\", hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange", "None ) serialize = mocker.patch.object(settings.ZISettings, \"serialize\") apply = mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") mocker.patch.object(zi_helpers,", "mocker.patch.object(hdawg, \"get_awg\", wraps=hdawg.get_awg) hdawg.zi_settings = settings.ZISettings( list(), [ (0, mocker.Mock()), (1, mocker.Mock()), (2,", "expected_acq_result[key] def test_uhfqa_wait_done(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") wait_done =", "# Assert assert not acq_result is None assert (0, 2) in acq_result for", "import zhinst from quantify_scheduler.types import Schedule @pytest.fixture def make_hdawg(mocker): def _make_hdawg( name: str,", "hdawg.awgs[i] = mocker.create_autospec(qcodes.hdawg.AWG, instance=True) component = zhinst.HDAWGInstrumentCoordinatorComponent(hdawg) mocker.patch.object(component.instrument_ref, \"get_instr\", return_value=hdawg) return component yield", "zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None ) serialize", "int], Any] = dict() for i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value,", "acq_result = hdawg.retrieve_acquisition() # Assert assert acq_result is None def test_hdawg_wait_done(mocker, make_hdawg): #", "make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"),", "disable=missing-class-docstring # pylint: disable=missing-function-docstring # pylint: disable=redefined-outer-name from __future__ import annotations from typing", "settings.ZISerializeSettings( f\"ic_{uhfqa.instrument.name}\", uhfqa.instrument._serial, uhfqa.instrument._type ) serialize.assert_called_with(Path(\".\"), uhfqa_serialize_settings) apply.assert_called_with(uhfqa.instrument) copy2.assert_called_with(\"uhfqa0_awg0.csv\", \"waves\") def test_uhfqa_retrieve_acquisition(mocker, make_uhfqa):", "assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3), ] for i in range(4):", "list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.start() # Assert uhfqa.instrument.awg.run.assert_called() def", "np import pytest from zhinst import qcodes from quantify_scheduler.backends.zhinst import helpers as zi_helpers", "for i, value in enumerate(expected_data): expected_acq_result[(0, i)] = (value, 0.0) # Assert assert", "setup \"qas\" uhfqa.qas = [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component =", "hdawg.instrument._serial, hdawg.instrument._type ) serialize.assert_called_with(Path(\".\"), hdawg_serialize_settings) apply.assert_called_with(hdawg.instrument) def test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent =", "uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") config = ZIDeviceConfig( \"hdawg0\", Schedule(\"test\"), settings.ZISettingsBuilder(), None )", "return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[\"uhfqa0_awg0.csv\"]) copy2 = mocker.patch(\"shutil.copy2\") # Act uhfqa.prepare(config) # Assert uhfqa_serialize_settings", "mocker.Mock()), (2, mocker.Mock()), (3, mocker.Mock()), ], ) timeout: int = 20 # Act", "mocker.Mock()), ], ) # Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0),", "i in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\",", "= mocker.patch.object(settings.ZISettings, \"apply\") mocker.patch(\"quantify_core.data.handling.get_datadir\", return_value=\".\") # Act hdawg.prepare(config) # Assert hdawg_serialize_settings = settings.ZISerializeSettings(", "test_hdawg_retrieve_acquisition(make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\") # Act acq_result = hdawg.retrieve_acquisition()", "uhfqa.retrieve_acquisition() expected_acq_result: Dict[Tuple[int, int], Any] = dict() for i, value in enumerate(expected_data): expected_acq_result[(0,", "i in range(4): hdawg.get_awg(i).wait_done.assert_called_with(timeout) def test_initialize_uhfqa(make_uhfqa): make_uhfqa(\"uhfqa0\", \"dev1234\") def test_uhfqa_start(mocker, make_uhfqa): # Arrange", "list(), [ (0, mocker.Mock()), ], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def", "make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") uhfqa.zi_settings = settings.ZISettings( list(), [", "= [None] * 1 uhfqa.qas[0] = mocker.create_autospec(None, instance=True) component = zhinst.UHFQAInstrumentCoordinatorComponent(uhfqa) mocker.patch.object(component.instrument_ref, \"get_instr\",", "test_uhfqa_retrieve_acquisition(mocker, make_uhfqa): # Arrange uhfqa: zhinst.UHFQAInstrumentCoordinatorComponent = make_uhfqa(\"uhfqa0\", \"dev1234\") expected_data = np.ones(64) def", "[ (0, mocker.Mock()), ], ) # Act uhfqa.stop() # Assert uhfqa.instrument.awg.stop.assert_called() def test_uhfqa_prepare(mocker,", "# Act hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0),", "Any, Dict, Tuple from pathlib import Path from unittest.mock import call import numpy", "# Act hdawg.stop() # Assert assert get_awg_spy.call_args_list == [ call(0), call(1), call(2), call(3),", "in range(4): hdawg.get_awg(i).stop.assert_called() def test_hdawg_prepare(mocker, make_hdawg): # Arrange hdawg: zhinst.HDAWGInstrumentCoordinatorComponent = make_hdawg(\"hdawg0\", \"dev1234\")", "return_value=\".\") mocker.patch.object(zi_helpers, \"get_waves_directory\", return_value=Path(\"waves/\")) mocker.patch.object(Path, \"glob\", return_value=[]) uhfqa.prepare(config) # Act acq_result = uhfqa.retrieve_acquisition()", "hdawg.start() # Assert assert get_awg_spy.call_args_list == [ call(3), call(2), call(1), call(0), ] for", "Path from unittest.mock import call import numpy as np import pytest from zhinst" ]
[ "'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def", "import State from flask_login import current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback(", "'data')) def username(data): if data is None: return '' else: return f'Hello {data}'", "import Output from dash.dependencies import State from flask_login import current_user import pandas_datareader as", "def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data):", "Output from dash.dependencies import State from flask_login import current_user import pandas_datareader as pdr", "data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data", "import current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'),", "1), end=dt.now()) return { 'data': [{ 'x': df.index, 'y': df.Close }], 'layout': {'margin':", "datetime import datetime as dt from dash.dependencies import Input from dash.dependencies import Output", "20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args,", "'y': df.Close }], 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}", "Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017,", "dash.dependencies import Input from dash.dependencies import Output from dash.dependencies import State from flask_login", "{'l': 40, 'r': 0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown',", "def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data':", "cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if", "'data': [{ 'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40, 'r': 0,", "'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data):", "dash.dependencies import State from flask_login import current_user import pandas_datareader as pdr def register_callbacks(dashapp):", "1, 1), end=dt.now()) return { 'data': [{ 'x': df.index, 'y': df.Close }], 'layout':", "import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data'))", "update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{", "datetime as dt from dash.dependencies import Input from dash.dependencies import Output from dash.dependencies", "flask_login import current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown',", "start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{ 'x': df.index, 'y': df.Close }],", "Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username", "0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data'))", "pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{ 'x': df.index, 'y': df.Close", "'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40, 'r': 0, 't': 20,", "current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data is None:", "register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df =", "data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{ 'x':", "from dash.dependencies import State from flask_login import current_user import pandas_datareader as pdr def", "Input('user-store', 'data')) def username(data): if data is None: return '' else: return f'Hello", "dt from dash.dependencies import Input from dash.dependencies import Output from dash.dependencies import State", "pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data):", "'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username',", "[{ 'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40, 'r': 0, 't':", "pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def", "'t': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def", "State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data'))", "dash.dependencies import Output from dash.dependencies import State from flask_login import current_user import pandas_datareader", "end=dt.now()) return { 'data': [{ 'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l':", "40, 'r': 0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'),", "Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'),", "df.Close }], 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}} }", "@dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data is None: return '' else:", "'children'), Input('user-store', 'data')) def username(data): if data is None: return '' else: return", "State from flask_login import current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph',", "}], 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}} } @dashapp.callback(", "= pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{ 'x': df.index, 'y':", "@dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value,", "'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store',", "@dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated: return", "Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1),", "if current_user.is_authenticated: return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data is", "30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if", "return { 'data': [{ 'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40,", "current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data is None: return ''", "return current_user.username @dashapp.callback(Output('username', 'children'), Input('user-store', 'data')) def username(data): if data is None: return", "as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value,", "as dt from dash.dependencies import Input from dash.dependencies import Output from dash.dependencies import", "from dash.dependencies import Input from dash.dependencies import Output from dash.dependencies import State from", "{ 'data': [{ 'x': df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40, 'r':", "from datetime import datetime as dt from dash.dependencies import Input from dash.dependencies import", "from flask_login import current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'),", "'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return {", "} @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store', 'data')) def cur_user(args, data): if current_user.is_authenticated:", "def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df", "State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return", "'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now())", "Input from dash.dependencies import Output from dash.dependencies import State from flask_login import current_user", "df.index, 'y': df.Close }], 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b':", "import datetime as dt from dash.dependencies import Input from dash.dependencies import Output from", "'figure'), Input('my-dropdown', 'value'), State('user-store', 'data')) def update_graph(selected_dropdown_value, data): df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1,", "df = pdr.get_data_yahoo(selected_dropdown_value, start=dt(2017, 1, 1), end=dt.now()) return { 'data': [{ 'x': df.index,", "current_user import pandas_datareader as pdr def register_callbacks(dashapp): @dashapp.callback( Output('my-graph', 'figure'), Input('my-dropdown', 'value'), State('user-store',", "import Input from dash.dependencies import Output from dash.dependencies import State from flask_login import", "'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store',", "'r': 0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'), Input('my-dropdown', 'value'), State('user-store',", "{'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}} } @dashapp.callback( Output('user-store', 'data'),", "from dash.dependencies import Output from dash.dependencies import State from flask_login import current_user import" ]
[ "verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True,", "verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self):", "optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return", "blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom singulier autre que", "optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255,", "def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue,", "from django.contrib.auth.models import User import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name", "'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1',", "optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model):", "verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User,", "on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un", "return \"indice de {}\".format(self.player.username) #trouver un nom singulier autre que UserClue (déjà utilisé", "#question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer =", "= models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username)", "models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas", "= models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player", "models from django.contrib.auth.models import User import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model):", "= models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question d'entrée optionnel (réponse)", "à l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True)", "name = models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def __str__(self): return \"Message", "\"indice de {}\".format(self.player.username) #trouver un nom singulier autre que UserClue (déjà utilisé par", "django.contrib.auth.models import User import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name =", "accéder à l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255,", "import models from django.contrib.auth.models import User import datetime now = datetime.datetime.now() \"\"\" class", "models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1 = models.TextField(verbose_name", "n'y a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code =", "{}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien',", "models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom singulier autre", "(réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\",", "models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def __str__(self): return \"Message de {}\".format(self.name)", "a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True,", "de {}\".format(self.player.username) #trouver un nom singulier autre que UserClue (déjà utilisé par django)", "= datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content =", "blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question d'entrée", "'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice", "de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default=", "import User import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50)", "date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom", "class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now,", "class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si", "def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre')", "models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player =", "models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder", "= models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def __str__(self): return \"Message de", "\"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42,", "__str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom singulier autre que UserClue (déjà", "email = models.EmailField() content = models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\"", "models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42,", "<reponame>Raistlin11123/inquiry-soso<filename>mainapp/models.py<gh_stars>0 from django.db import models from django.contrib.auth.models import User import datetime now =", "content = models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title", "max_length=30) #code pour accéder à l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True,", "blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class", "null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question", "max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title)", "= models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self):", "__str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE)", "pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\",", "models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True) def", "models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver", "datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField()", "now = datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content", "paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code", "django.db import models from django.contrib.auth.models import User import datetime now = datetime.datetime.now() \"\"\"", "return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel =", "= models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse optionelle\", max_length=255, blank=True)", "player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def", "pour accéder à l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\",", "= models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a", "il n'y a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code", "d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30)", "l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer", "Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def __str__(self): return", "url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1", "__str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel", "return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date", "blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue =", "clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de", "models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question d'entrée optionnel (réponse) optional_question", "#code pour accéder à l'indice #question d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question", "datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content = models.TextField()", "= models.TextField(verbose_name = 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour", "optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True, verbose_name=\"réponse", "title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y", "on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice", "si il n'y a pas d'image paragraph1 = models.TextField(verbose_name = 'paragraph1', null=True, blank=True)", "= models.EmailField() content = models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class", "= models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1 =", "User import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email", "max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue", "\"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def", "import datetime now = datetime.datetime.now() \"\"\" class Contact(models.Model): name = models.CharField(max_length=50) email =", "UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True)", "models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date = models.DateTimeField(default=now, blank=True) def __str__(self): return", "\"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE) clue = models.ForeignKey(Clue, on_delete=models.CASCADE) date =", "= models.DateTimeField(default=now, blank=True) def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom singulier", "code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question d'entrée optionnel", "optionelle\", max_length=255, blank=True) def __str__(self): return \"{}\".format(self.title) class UserClues(models.Model): player = models.ForeignKey(User, on_delete=models.CASCADE)", "def __str__(self): return \"indice de {}\".format(self.player.username) #trouver un nom singulier autre que UserClue", "class Contact(models.Model): name = models.CharField(max_length=50) email = models.EmailField() content = models.TextField() def __str__(self):", "d'entrée optionnel (réponse) optional_question = models.CharField(null=True, verbose_name=\"question optionelle\", max_length=255, blank=True) optional_answer = models.CharField(null=True,", "default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image paragraph1 = models.TextField(verbose_name =", "verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il n'y a pas d'image", "= 'paragraph1', null=True, blank=True) code = models.CharField(null=True, verbose_name=\"code\", max_length=30) #code pour accéder à", "verbose_name=\"code\", max_length=30) #code pour accéder à l'indice #question d'entrée optionnel (réponse) optional_question =", "from django.db import models from django.contrib.auth.models import User import datetime now = datetime.datetime.now()", "= models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model): title =", "\"\"\" class Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien", "models.EmailField() content = models.TextField() def __str__(self): return \"Message de {}\".format(self.name) \"\"\" class Clue(models.Model):", "Clue(models.Model): title = models.CharField(max_length=42, verbose_name='Titre') url_img_optionnel = models.CharField(max_length=42, default= 'rien', verbose_name='url_img_optionnel')#rien si il" ]
[ "learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats))", "= (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats)) learner =", ".label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats)) learner = unet_learner(data, torchvision.models.resnet50) learner.export('../export.pkl')", "torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch()", "import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34)", "from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner =", "<reponame>ncoop57/deep_parking<filename>model-ms/benchmark/make_fixture_models.py import torchvision from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data =", "data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda", "fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data,", "unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data =", "= ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x:", "cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data", "(SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats)) learner = unet_learner(data,", "ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg',", "ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export()", "SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images')", "learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0,", "imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner = cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none()", ".split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats)) learner = unet_learner(data, torchvision.models.resnet50)", "torchvision from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats) learner", "data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2]) .databunch() .normalize(imagenet_stats)) learner", "= cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1,", "cnn_learner(data, torchvision.models.resnet34) learner.export() data = (SegmentationItemList.from_folder('fixtures/segmentation/images') .split_none() .label_from_func(lambda x: f'fixtures/segmentation/masks/{x.stem}.jpg', classes=[0, 1, 2])", "import torchvision from fastai.vision import ImageDataBunch, cnn_learner, unet_learner, SegmentationItemList, imagenet_stats data = ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)" ]
[ "self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002", "vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price >", "= vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1 -", "0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01,", "self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level", "0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310,", "Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"]", "max_volume: volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量:", "= getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1 - self.reward_ratio * 0.99):", "0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0", "ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1:", "if type(user_account) is not AccountData: return False self.current_balance[vt_token] = user_account.balance return True def", "2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick =", "{one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event()", "0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 +", "\"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction ==", "else max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_ask_volume =", "流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level", "market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level = num_level break if self.bid_order_level", "+ 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick,", "self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif", "{active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol", "self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level >", "ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume =", "310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables", "for num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price:", "self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False self.current_balance[vt_token] =", "cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return", "getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price =", "BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\",", "super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max =", "parse active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR:", "\"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name: str, setting:", "+ self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask =", "* (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2", "<= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume =", "if 0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume = self.volume *", "self, algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) #", "f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)", "if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume", "self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event()", "ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1", "= user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval},", "{self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume", "from _datetime import datetime, timedelta from enum import Enum import math import random", "import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\":", "if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price >", "< self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not", "= self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume", "one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def", "0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\")", "vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume = self.volume", "vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price *", "not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match", "> self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume", "one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level", "setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"]", "{vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume", "= 0 for num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if", "< (self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\")", "else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level in", "self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid:", "= 0 for num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if", "self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price", "self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if", "self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if", "if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade:", "getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") #", "低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif", "= num_level break if self.bid_order_level > 0: total_bid_volume = 0 for num_level in", "for num_level in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume", "{one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid ==", "cancel_bid: total_bid_volume = 0 for num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\")", "self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\"", "use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1)", "self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\")", "高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif", "self.last_tick: return if self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos}", "one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"--->", "0 < ask_price < market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level =", "self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume =", "== 0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price", "\"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid =", "f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0", "self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume:", "= 0 for num_level in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\")", "default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0,", "active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not", "self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if", "< max_volume: max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume >", "= 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price},", "token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\"", "{self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio >", "if 0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price =", "< vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price", "vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price", "= getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price", "False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self):", "cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume /", "market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price =", "= round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price,", "max_volume: volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量:", "f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price #", "one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\"", "self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active():", "return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\")", "{self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume", "> 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1", "[ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name: str,", "if not cancel_bid: total_bid_volume = 0 for num_level in range(1, 6): bid_price =", "self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\":", "(1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8", "0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001:", "bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1", "= 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume =", "+= getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume =", "round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask", "0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001", "TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction", "Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine", "if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume", "return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def", "\"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\":", "self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event()", "self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume <", "from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import", "import Decimal from _datetime import datetime, timedelta from enum import Enum import math", "self.min_order_volume volume = min_volume if not use_max_volume else max_volume * max_volume_ratio if volume", "] def __init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine,", "(ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume", "setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume", "self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <= self.price_offset_max assert 0", "* 0.99): self.bid_order_level = num_level break if self.bid_order_level > 0: total_bid_volume = 0", "[{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1", "getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price", "self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume", "use_max_volume else max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_bid_volume", "\"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict", "total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid:", "f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price", "2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price", "= total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume < max_volume:", "max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_ask_volume = round_to(volume", "in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume:", "one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else", "bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume)", "collections import defaultdict from decimal import Decimal from _datetime import datetime, timedelta from", "random import re import requests import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility", "self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol,", "False self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max},", "self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\")", "else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid:", "self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0", "= \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\":", "False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask =", "Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\" self.write_log(\"停止 流动性挖矿\") #", "+= 1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance},", "取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick", "setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level =", "failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance()", "import defaultdict from decimal import Decimal from _datetime import datetime, timedelta from enum", "= True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price}", "if self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\")", "if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio =", "(self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 -", "def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid", "* max_volume_ratio if volume >= max_volume: volume = max_volume self.last_bid_volume = round_to(volume -", "+ self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level", "import round_to from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData,", "validate setting assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <= 5 #", "* 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2):", "kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks >", "= 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume =", "display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1,", "assert self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price", "min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price", "= self.min_order_volume volume = min_volume if not use_max_volume else max_volume * max_volume_ratio if", "< self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count", "trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\" self.write_log(\"停止", "self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False)", "Enum import math import random import re import requests import time from vnpy.app.algo_trading", "= self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\":", "{one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if", "self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if", "if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG:", "round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume)", "量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume", "0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price *", "algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol =", "= one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1},", "max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid ==", "True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价", "+ self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 =", "time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction,", "self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 #", "> {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume = 0 for num_level", "volume = min_volume if not use_max_volume else max_volume * max_volume_ratio if volume >=", "< self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\")", "kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks >", "return self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0", "流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\")", "self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <=", "-1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1 - self.reward_ratio", "= self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False self.current_balance[vt_token] = user_account.balance return", "self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask:", "num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume", "ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) <", "< max_volume: max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume >", "self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level", "self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price =", "self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol)", "vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = {", "time to kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick}", "{self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume = 0 for num_level in", "self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos", "bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1:", "volume >= max_volume: volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格:", "2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0,", "if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0", "> vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid", "self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count <", "> {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume = 0 for num_level", "self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price ==", "# if time to kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick >", "self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\"", "on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\")", "= setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio", "{ \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3,", "> (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid)", "self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count +=", "0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price *", "流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\":", "0 else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick,", "import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status, OrderType from", "= round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0", "self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market =", "return False self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset},", "self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick", "* self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume", "self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price},", "max_volume_ratio if volume >= max_volume: volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick,", "import requests import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from", "1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\")", "self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume:", "max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume > 0 and", "self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level in range(self.min_order_level, 0, -1): ask_price", "> self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume", "= self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) /", "量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\" self.write_log(\"停止 流动性挖矿\") # self.write_log(f\"账户状态:{self.algo_engine.main_engine.get_all_accounts()}\") time.sleep(5)", "= setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting", "当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if", "self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate", "self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self,", "(self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for", "self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\")", "SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens =", "+= getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else", "break if self.ask_order_level > 0: total_ask_volume = 0 for num_level in range(1, self.ask_order_level", "for num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price >", "== self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid", "\"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\":", "if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick +=", "if 0 < ask_price < market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level", "流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not", "高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True", "0 for num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0", "= setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio =", "Decimal from _datetime import datetime, timedelta from enum import Enum import math import", "= setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval)", "* 0.99): self.ask_order_level = num_level break if self.ask_order_level > 0: total_ask_volume = 0", "= 0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def", "0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000,", "self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\",", "OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class", "(self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if", "self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0]", "= round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price:", "or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume", "self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for", "self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price", "self.ask_order_level = num_level break if self.ask_order_level > 0: total_ask_volume = 0 for num_level", "order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\"", "\"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick", "{ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price", "self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume =", "f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick,", "当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\":", "self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"]", "self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True", "{vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume", "True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价", "\"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\":", "= setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos =", "(1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max *", "* (1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max", "= 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick =", "cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\")", "if time to kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel:", "def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1)", "True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self):", "0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick = None", "0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1 -", "= 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER", "/ 2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level in range(self.min_order_level,", "setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <=", "import random import re import requests import time from vnpy.app.algo_trading import AlgoTemplate from", "self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market", "\"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2,", "= setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio =", "(1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8", "* (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0", "vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from", "self.ask_order_level = 0 for num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\")", "self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level >", "self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif", "elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price,", "market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR:", "* max_volume_ratio if volume >= max_volume: volume = max_volume self.last_ask_volume = round_to(volume -", "\"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\":", "+= 1 # if time to kill cancel_ask = False if self.enable_ioc and", "user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False self.current_balance[vt_token] = user_account.balance", "0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price =", "self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price", "market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick +=", "{self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert", "market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if", "= getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level", "f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price =", "for num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price", "0 for num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 <", "total_bid_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume", "_datetime import datetime, timedelta from enum import Enum import math import random import", "= getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\")", "one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2:", "from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting =", "to kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks", "self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price =", "/ 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick", "to kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks", "range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1", "1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"]", "\"\": self.bid_order_alive_tick += 1 # if time to kill cancel_bid = False if", "- self.reward_ratio * 0.99): self.bid_order_level = num_level break if self.bid_order_level > 0: total_bid_volume", "0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0", "<= 5 # Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid = \"\"", "if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if time to kill cancel_ask", "elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume -", "abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\")", "f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\")", "-= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def", "else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\")", "self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio", "getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price", "= vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price", "= (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 +", "= self.volume * total_ask_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume", "0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0", "self.volume * total_ask_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume =", "= 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price =", "True if not cancel_bid: total_bid_volume = 0 for num_level in range(1, 6): bid_price", "\"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__(", "{market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {}", "self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume", "= getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\")", "one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level =", "trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif", "0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if not", "(1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max *", "(1 - self.reward_ratio * 0.99): self.bid_order_level = num_level break if self.bid_order_level > 0:", "2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick =", "cancel_ask: total_ask_volume = 0 for num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\")", "{self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume = 0 for num_level in", "{self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick =", "self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume = 0", "market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level =", "== 0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price", "range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume", "order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event()", "self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid !=", "\"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002", "not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if", "tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick", "if bid_price > market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level = num_level", "round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume)", "self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask =", "else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\")", "self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"--->", "f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account", "total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume", "* (1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max))", "self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid", "_update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData:", "/ 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if time to", "- self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price", "6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume +=", "低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True", "tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if time", "\"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\":", "for num_level in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume", "f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price #", "total_ask_volume = 0 for num_level in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick,", "vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价", "< self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if not use_max_volume else max_volume", "< (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1", "if not cancel_ask: total_ask_volume = 0 for num_level in range(1, 6): ask_price =", "- total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if", "= getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\")", "defaultdict from decimal import Decimal from _datetime import datetime, timedelta from enum import", "self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos", "= one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if", "+ self.reward_ratio * 0.99): self.ask_order_level = num_level break if self.ask_order_level > 0: total_ask_volume", "order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\"", "symbol {market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance =", "self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT:", "= 0 for num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0", "OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所", "active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt", "in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False self.current_balance[vt_token]", "vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价", "= setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume =", "0 <= self.min_order_level <= 5 # Variables self.pos = 0 self.timer_count = 0", "2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level in range(self.min_order_level, 0,", "= num_level break if self.ask_order_level > 0: total_ask_volume = 0 for num_level in", "< ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\")", "vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status, OrderType", "self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume", "self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0", "self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match:", "self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price <", "self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price", "= vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1 -", "f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\")", "= active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse", "def __init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name,", "self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio", "self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick", "failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair}", "min_volume = self.volume * total_bid_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume:", "self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\" self.write_log(\"停止 流动性挖矿\") # self.write_log(f\"账户状态:{self.algo_engine.main_engine.get_all_accounts()}\")", "> (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid)", "import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import", "\"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\":", "not AccountData: return False self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time())", "assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <= 5 # Variables self.pos", "not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio", "= one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 <", "bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume =", "self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额", "from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData", "= self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick =", "0 for num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price >", "(1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or", "setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\",", "ticks > {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume = 0 for", "str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"]", "> 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if", "= True if not cancel_ask: total_ask_volume = 0 for num_level in range(1, 6):", "= self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume > 0 and min_volume", "max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_bid_volume = round_to(volume", "vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if", "0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price *", "self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price:", "Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import", "= \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price =", "= tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\":", "self.ask_order_alive_tick += 1 # if time to kill cancel_ask = False if self.enable_ioc", "self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else:", "self.interval) # validate setting assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <=", "datetime, timedelta from enum import Enum import math import random import re import", "2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if time to kill", "self.ask_order_level > 0 else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price", "volume >= max_volume: volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格:", "getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick,", "= self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume:", "= self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume:", "* total_bid_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume", "self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else:", "= 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level =", "0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume =", "self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price", "{one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self,", "AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name =", "= 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos < self.min_pos", "def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not", "\"\": self.ask_order_alive_tick += 1 # if time to kill cancel_ask = False if", "max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume =", "1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\")", "= self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price", "{self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick", "self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price ==", "if self.bid_order_level > 0 else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick)", "vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\"", "min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price # vt_ask_price =", "0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if time to kill", "持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0", "max_volume: max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume > 0", "self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for", "\"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002", "0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level = 0", "/ 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick", "if self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if", "{self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick >", "(self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1 +", ">= max_volume: volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price},", "self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price =", "= 0 for num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price", "= 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price", "\"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict ):", "self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume = min_volume", "{ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price", "!= self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick,", "self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False self.current_balance[vt_token] = user_account.balance return True", "True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid", "= setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio =", "on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1) /", "0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos < self.min_pos or", "> vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) >", "getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick,", "vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price:", "range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price", "0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0,", "= 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume", "self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True", "import datetime, timedelta from enum import Enum import math import random import re", "vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price <", "0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER =", "= 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume =", "0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\"", "from decimal import Decimal from _datetime import datetime, timedelta from enum import Enum", "tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2", "vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid =", "# vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price", "cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True", "= True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2):", "True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume},", "not use_max_volume else max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume", "vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price *", "self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price", "= True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def", "= setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos =", "0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\"", "= 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0 self.ask_order_level = 0 self.bid_order_level =", "bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price >", "token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False", "self.min_order_level <= 5 # Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid =", "self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if time", "self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick = tick market_price =", "vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset))", "Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0", "> market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level = num_level break if", "= \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction", "timedelta from enum import Enum import math import random import re import requests", "+ 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick,", "bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) <", "self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return", "ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]]", "abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\")", "self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos +=", "self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return", "= setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level", "self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume", "0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price *", "import Enum import math import random import re import requests import time from", "setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1 self.interval =", "getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price # vt_ask_price = round_to(min_ask_price +", "one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0", "if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False", "= setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert", "AccountData: return False self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿:", "return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 +", "- self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2))", "1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓", "bid_price > market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level = num_level break", "num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price <", "* (1 + self.reward_ratio * 0.99): self.ask_order_level = num_level break if self.ask_order_level >", "setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"]", "vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset))", "{vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"--->", "min_volume = self.volume * total_ask_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume:", "self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price}", "if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick:", "class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\":", "range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price", "max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume < max_volume:", "elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0", "cancel_ask = True if not cancel_ask: total_ask_volume = 0 for num_level in range(1,", "if not use_max_volume else max_volume * max_volume_ratio if volume >= max_volume: volume =", "= getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price # vt_bid_price = round_to(max_bid_price", "0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid", "self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume <", "= active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\")", "# self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if", "# self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos", "self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance():", "self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price <", "num_level in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume !=", "* (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0", "超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count", "= 0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid =", "type(user_account) is not AccountData: return False self.current_balance[vt_token] = user_account.balance return True def on_start(self):", "> self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count", "f\"ask_price_{num_level}\") if 0 < ask_price < market_price * (1 + self.reward_ratio * 0.99):", "self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price}", "0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002:", "self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume = self.volume", "0: total_ask_volume = 0 for num_level in range(1, self.ask_order_level + 1): total_ask_volume +=", "} variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine:", "ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick,", "vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio /", "max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid =", "> self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level", "if volume >= max_volume: volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick)", "* (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2", "= getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price +", "5 # Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price", "cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0", "cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price < vt_bid_price: cancel_bid = True", "{vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8", "\"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\":", "self.price_offset_max assert 0 <= self.min_order_level <= 5 # Variables self.pos = 0 self.timer_count", "# Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume =", "> 0: total_bid_volume = 0 for num_level in range(1, self.bid_order_level + 1): total_bid_volume", "assert 0 <= self.min_order_level <= 5 # Variables self.pos = 0 self.timer_count =", "= getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price * (1 + self.reward_ratio", "one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid", "self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price", "= None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair", "if self.bid_order_level > 0: total_bid_volume = 0 for num_level in range(1, self.bid_order_level +", "self.bid_order_level = 0 for num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\")", "getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price", "self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2", "(tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 #", "variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine,", "None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair =", "= [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name:", "= [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token", "total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0", "= getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price", "vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price:", "not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def on_trade(self, trade: TradeData):", "active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse", "{self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self,", "+ self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1", "* (1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max", "= SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens", "or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1", "vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return False", "max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price", "< vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask", "time to kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick}", "self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio = self.max_volume_ratio market_price =", "0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume =", "bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1 - self.reward_ratio *", "if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\")", "> 0 else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price =", "else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0", "self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos <", "0 < ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick,", "= 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\")", "getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price # vt_bid_price = round_to(max_bid_price -", "self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0)", "max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume > 0 and", "< market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level = num_level break if", "取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not", "(self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if", "\"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables =", "= setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume =", "- self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick", "- self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1", "setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) #", "> 0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price ==", "total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume <", "if self.ask_order_level > 0: total_ask_volume = 0 for num_level in range(1, self.ask_order_level +", "False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper()) if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return", "self.reward_ratio * 0.99): self.ask_order_level = num_level break if self.ask_order_level > 0: total_ask_volume =", "{bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price", "self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol,", "< self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level", "if 0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume = self.volume *", "self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume > 0 and min_volume <", "self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1", "1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\",", "or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match = SYMBOL_SPLITTER.match(market_token_pair.upper())", "self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level in range(self.min_order_level, 0, -1): bid_price", "= self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick: TickData):", "* (1 - self.reward_ratio * 0.99): self.bid_order_level = num_level break if self.bid_order_level >", "self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is", "= True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2):", "self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume", "0: total_bid_volume = 0 for num_level in range(1, self.bid_order_level + 1): total_bid_volume +=", "{vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"--->", "self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level", "= (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0", "False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid =", "self.put_variables_event() def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1]", "(self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1 -", "= self.volume * total_bid_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume", "self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token)", "total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume", "self.bid_order_level = num_level break if self.bid_order_level > 0: total_bid_volume = 0 for num_level", "* (1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max))", "self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume <", "= total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume", "0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self,", "# validate setting assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <= 5", "(1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2 =", "else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if", "in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume:", "one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if", "= True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price}", "f\"bid_price_{num_level}\") if bid_price > market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level =", "= setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0 <= self.max_volume_ratio <= 1 self.interval", "= getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price # vt_ask_price = round_to(min_ask_price", ">= max_volume: volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price},", "self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if not use_max_volume else max_volume *", "self.reward_ratio * 0.99): self.bid_order_level = num_level break if self.bid_order_level > 0: total_bid_volume =", "+ self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 =", "self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume", "return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume},", "self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset", "import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name", "\"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid", "0) assert 0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"]", "> 0 else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price =", "self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price:", "= True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if", "max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price # vt_bid_price =", "{vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8", "- self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid =", "# if time to kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick >", "self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if time to kill cancel_ask =", "< ask_price < market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level = num_level", "self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price =", "round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid", "elif self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume -", "self.volume * total_bid_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume =", "self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid ==", "self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额:", "# vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price", "(self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset))", "bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if", "self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 =", "not cancel_ask: total_ask_volume = 0 for num_level in range(1, 6): ask_price = getattr(tick,", "self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] *", "num_level in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume !=", "range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price", "= getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price", "流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price", "setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"]", "= min_volume if not use_max_volume else max_volume * max_volume_ratio if volume >= max_volume:", "Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos", "for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account) is not AccountData: return", "if self.ask_order_level > 0 else market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)", "getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price * (1 + self.reward_ratio *", "round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 =", "1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1,", "\"\": self.ask_order_level = 0 for num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick,", "self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price <", "self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <= 5 # Variables self.pos =", "0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol,", "and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if not", "= 0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol)", "volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\")", "if not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens = [", "num_level break if self.bid_order_level > 0: total_bid_volume = 0 for num_level in range(1,", "condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price =", "cancel_bid = True if not cancel_bid: total_bid_volume = 0 for num_level in range(1,", "self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active():", "\"\": self.bid_order_level = 0 for num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick,", "TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting", "setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <= self.price_offset_max", "\"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\":", "if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level in range(self.min_order_level, 0, -1):", "self.vt_ask_price > vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume)", "if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level in range(self.min_order_level, 0, -1):", "break if self.bid_order_level > 0: total_bid_volume = 0 for num_level in range(1, self.bid_order_level", "trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self):", "< self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\")", "self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"--->", "or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume", "(1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or", "ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price * (1 +", "= 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price =", "< self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume if", "self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume = 0", "# Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price =", "volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\")", "0.99): self.ask_order_level = num_level break if self.ask_order_level > 0: total_ask_volume = 0 for", "self.put_variables_event() def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos", "self.bid_order_level > 0 else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price", "self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc", "f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick,", "in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if 0 < ask_price < self.last_ask_price:", "= 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if time to", "= max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid", "self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\"", "total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0", "min_volume if not use_max_volume else max_volume * max_volume_ratio if volume >= max_volume: volume", "(self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 +", "bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]]", "elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\"", "self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price", "50000, \"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def", "setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level", "setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"]", "{self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event() return", "= self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume", "one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData):", "and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if not use_max_volume", "self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order:", "vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price *", "self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if", "getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick,", "on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume", "import math import random import re import requests import time from vnpy.app.algo_trading import", "= one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1},", "流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level in range(self.min_order_level, 0,", "(self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask: self.cancel_order(self.vt_ask_orderid) #", "\"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction ==", "OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price", "0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0,", "TickData): \"\"\"\"\"\" self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if", "ask_price < market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level = num_level break", "AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object", "{} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if type(user_account)", "getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price * (1 - self.reward_ratio * 0.99): self.bid_order_level", "<= self.price_offset_max assert 0 <= self.min_order_level <= 5 # Variables self.pos = 0", "setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\",", "max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid =", "0.0 self.ask_order_level = 0 self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event()", "self.ask_order_level > 0: total_ask_volume = 0 for num_level in range(1, self.ask_order_level + 1):", "from collections import defaultdict from decimal import Decimal from _datetime import datetime, timedelta", "< self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume if", "if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if time to kill cancel_bid", "range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick,", "self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ]", "getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price", "self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count < self.interval:", "num_level break if self.ask_order_level > 0: total_ask_volume = 0 for num_level in range(1,", "= (self.last_bid_price * (1 - self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 +", "self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"--->", "= setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <=", "6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\")", "self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price * (1", "market_price * (1 + self.reward_ratio * 0.99): self.ask_order_level = num_level break if self.ask_order_level", "> (self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\")", "= setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <= self.price_offset_max assert 0 <=", "self.last_tick = tick market_price = (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid !=", "ask_condition2 = vt_ask_price > (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price <", "total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume", "min_volume < self.min_order_volume: min_volume = self.min_order_volume volume = min_volume if not use_max_volume else", "<= self.min_order_level <= 5 # Variables self.pos = 0 self.timer_count = 0 self.vt_ask_orderid", "min_volume = self.min_order_volume volume = min_volume if not use_max_volume else max_volume * max_volume_ratio", "流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为卖单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\")", "f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price =", "0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume = 0.0", "\"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\":", "self.bid_order_level > 0: total_bid_volume = 0 for num_level in range(1, self.bid_order_level + 1):", "self.bid_order_level = 0 self.last_tick = None self._init_market_accounts(self.vt_symbol) self.subscribe(self.vt_symbol) self.put_parameters_event() self.put_variables_event() def _init_market_accounts(self, active_vt_symbol):", "if time to kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel:", "< (self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1", "\"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\",", "= round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price,", "in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume +=", "elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price}", "\"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\":", "< (self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price > (self.origin_ask_price * (1", "= vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price", "== 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price", "3, \"min_order_level\": 1, \"min_order_volume\": 0, \"sell_max_volume\": 0, \"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1,", "+= getattr(tick, f\"bid_volume_{num_level}\") # max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else", "{self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def", "= round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price:", "TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\"", "active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair", "+= getattr(self.last_tick, f\"bid_volume_{num_level}\") if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume =", "self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio", "setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"]", "self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] *", "and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume) else:", "trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price},", "False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert self.price_offset <= self.price_offset_max assert", "self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick: TickData): \"\"\"\"\"\" self.last_tick", "if not self.last_tick: return if self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all()", "if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1", "self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price", "= 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price =", "* 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2):", "== Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos += trade.volume self.put_variables_event() def on_stop(self): \"\"\"\"\"\" self.write_log(\"停止 流动性挖矿\")", "== 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 = (self.last_bid_price", "self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0 def on_tick(self, tick:", "self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price * (1 + self.price_offset_max)) bid_condition8 = one_bid_price", "def on_trade(self, trade: TradeData): \"\"\"\"\"\" if trade.direction == Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -=", "= setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc =", "{bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price", "one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume", "max_volume: max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume > 0", "* self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume =", "num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price", "= round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0", "(1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2 =", "0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price *", "BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol", "not cancel_bid: total_bid_volume = 0 for num_level in range(1, 6): bid_price = getattr(tick,", "def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos < self.min_pos or self.pos", "{self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume", "if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price", "if volume >= max_volume: volume = max_volume self.last_bid_volume = round_to(volume - self.volumetick, self.volumetick)", "and one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else:", "from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine import BaseEngine class LiquidMiningAlgo(AlgoTemplate):", "= 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume =", "requests import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant", "- total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if", "__init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting)", "self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick", "self.vt_bid_price < vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume)", "return if self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos},", "in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price > market_price *", "SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or", "self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\")", "f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0", "market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return False token_pair_match =", "self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001", "setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel =", "total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price =", "total_bid_volume = 0 for num_level in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick,", "one_bid_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿卖出高价one_bid_price: {one_bid_price}, one_bid_volume: {one_bid_volume}\") self.sell(self.vt_symbol, one_bid_price, one_bid_volume) else: self.write_log(f\"--->", "= { \"vt_symbol\": \"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\":", "/ vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume =", "self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio / vt_bid_price if 0 <", "self.write_log(f\"---> 流动性挖矿卖出下单失败,因为没有合适的下单位置\") if self.vt_bid_orderid == \"\": self.bid_order_level = 0 for num_level in range(self.min_order_level,", "\"min_pos\": 50000, \"max_pos\": 50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ]", "total_ask_volume = 0 for num_level in range(1, 6): ask_price = getattr(tick, f\"ask_price_{num_level}\") if", "\"\", \"price_offset\": 0.05, \"price_offset_max\": 0.1, \"volume\": 2, \"max_volume_ratio\": 0, \"interval\": 3, \"min_order_level\": 1,", "self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price = 0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume", "= False if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask", "0 else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) vt_bid_price = getattr(tick,", "_init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not", "ask_price < self.last_ask_price: total_ask_volume += getattr(tick, f\"ask_volume_{num_level}\") # min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if", "algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"]", "setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"] self.reward_ratio = setting[\"reward_ratio\"]", "if self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if", "50000, } variables = [ \"pos\", \"timer_count\", \"vt_ask_orderid\", \"vt_bid_orderid\" ] def __init__( self,", "self.write_log(f\"当前持仓: {self.pos} 超出[{self.min_pos}, {self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event()", "re import requests import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to", "0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol,", "(self.last_ask_price * (1 - self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset))", "\"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price =", "self.min_order_volume = setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio", "vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price", "one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2:", "self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick =", "round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 =", "active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol} failed\") return", "active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not active_market: self.algo_engine.main_engine.write_log(f\"ERROR: parse active_vt {active_vt_symbol}", "round_to from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData,", "cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\")", "if self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1", "# max_bid_price = getattr(tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price # vt_bid_price", "= setting[\"reward_ratio\"] self.min_pos = setting[\"min_pos\"] self.max_pos = setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel", "= vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume = self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio", "= \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001 self.last_ask_price =", "\"\"\"\"\"\" if not self.last_tick: return if self.pos < self.min_pos or self.pos > self.max_pos:", "f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick)", "self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick =", "f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price", "from enum import Enum import math import random import re import requests import", "market_price # vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) vt_ask_price = getattr(tick, f\"ask_price_1\") if", "量: {self.last_ask_volume}\") self.vt_ask_orderid = self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and", "max_volume_ratio if volume >= max_volume: volume = max_volume self.last_ask_volume = round_to(volume - self.volumetick,", "math import random import re import requests import time from vnpy.app.algo_trading import AlgoTemplate", "re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not active_market:", "in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price <", "if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid:", "< vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price", "+ self.price_offset_max)) bid_condition8 = one_bid_price > (self.origin_bid_price * (1 + self.price_offset_max * 2))", "流动性挖矿买入下单失败,因为买单总数量等于上一单数量\") else: self.write_log(f\"---> 流动性挖矿买入下单失败,因为没有合适的下单位置\") self.put_variables_event() def on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid ==", "total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price =", "total_ask_volume) > (self.total_ask_volume / 2): cancel_ask = True self.write_log(f\"---> 当前卖单{self.vt_ask_price} 取消,因为之前的订单量发生了变化\") if cancel_ask:", "self.ask_order_level > 0 else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price", "getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price = getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level >", "self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if", "self.bid_order_alive_tick += 1 # if time to kill cancel_bid = False if self.enable_ioc", "on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if self.pos < self.min_pos or self.pos >", "{self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick > 0", "self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and one_bid_volume < self.auto_trade_volume: self.write_log(f\"--->", "not token_pair_match: self.algo_engine.main_engine.write_log(f\"ERROR: parse symbol {market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\",", "-1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price < market_price * (1", "{self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio > 0 max_volume_ratio", "= \"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid", "elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price,", "== Direction.SHORT: self.write_log(f\"流动性挖矿卖单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\") self.pos -= trade.volume elif trade.direction == Direction.LONG: self.write_log(f\"流动性挖矿买单{trade.vt_orderid}成交,价:{trade.price}, 量:{trade.volume}\")", "one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else", "getattr(self.last_tick, f\"ask_volume_{num_level}\") if total_ask_volume != self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick,", "\"buy_max_volume\": 0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\":", "\"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, }", "= re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if not market_token_pair or not", "] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account =", "0 for num_level in range(1, self.bid_order_level + 1): total_bid_volume += getattr(self.last_tick, f\"bid_volume_{num_level}\") if", "= getattr(self.last_tick, f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price vt_bid_price = round_to(max_bid_price -", "True if not cancel_ask: total_ask_volume = 0 for num_level in range(1, 6): ask_price", "(bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume", "== \"\": self.ask_order_level = 0 for num_level in range(self.min_order_level, 0, -1): ask_price =", "* total_ask_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume", "if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume =", "if self.timer_count < self.interval: self.put_variables_event() return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\")", "> 0: total_ask_volume = 0 for num_level in range(1, self.ask_order_level + 1): total_ask_volume", "use_max_volume else max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_ask_volume", "else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price", "def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume},", "= {} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens: user_account = self.algo_engine.main_engine.get_account(vt_token) if", "= 0 for num_level in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\")", "0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume = self.volume * total_ask_volume", "cancel_ask: self.cancel_order(self.vt_ask_orderid) # self.ask_order_alive_tick = 0 if self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1", "self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price >", "not self.last_tick: return if self.pos < self.min_pos or self.pos > self.max_pos: self.cancel_all() self.write_log(f\"当前持仓:", "流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price", "= vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume = self.current_balance[self.market_vt_tokens[1]] * self.buy_max_ratio", "dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset =", "order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif", "total_bid_volume = 0 for num_level in range(1, 6): bid_price = getattr(tick, f\"bid_price_{num_level}\") if", "[ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token in", "): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"]", "self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price},", "from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status,", "0, \"auto_trade_volume\": 310, \"sell_max_ratio\": 1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000,", "self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1 - self.price_offset_max * 2)) self.write_log(f\"--->", "= 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\")", "setting[\"max_pos\"] self.enable_ioc = setting.get(\"enable_ioc\", False) self.ioc_intervel = setting.get(\"ioc_interval\", self.interval) # validate setting assert", "0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.origin_ask_price = 0.00000002 self.origin_bid_price = 0.00000001", "1 # if time to kill cancel_ask = False if self.enable_ioc and self.ask_order_alive_tick", "and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True if not", "\"vt_bid_orderid\" ] def __init__( self, algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\"", "self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume > 0 and min_volume <", "= False if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid", "self.pos = 0 self.timer_count = 0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid", "cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0", "vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume", "vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume", "self.last_tick.bid_price_1) / 2 if self.vt_ask_orderid == \"\": self.ask_order_level = 0 for num_level in", "enum import Enum import math import random import re import requests import time", "self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and", "0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume", "!= self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick,", "self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0", "ticks > {self.ioc_intervel},取消\") cancel_ask = True if not cancel_ask: total_ask_volume = 0 for", "elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price}", "self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume = self.volume * total_bid_volume if self.min_order_volume", "setting assert self.price_offset <= self.price_offset_max assert 0 <= self.min_order_level <= 5 # Variables", "= self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1 - self.price_offset)) < vt_ask_price", "assert 0 <= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume", "= self.sell_max_volume min_volume = self.volume * total_ask_volume if self.min_order_volume > 0 and min_volume", "user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level},", "setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset", "getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level >", "vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price > vt_ask_price: cancel_ask =", "- self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿买入价格: {vt_bid_price}, 量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick", "{self.max_pos}]范围,停止流动性挖矿\") return self.timer_count += 1 if self.timer_count < self.interval: self.put_variables_event() return self.timer_count =", "self.current_balance[self.market_vt_tokens[0]] * self.sell_max_ratio if 0 < self.sell_max_volume < max_volume: max_volume = self.sell_max_volume min_volume", "> (self.origin_ask_price * (1 - self.price_offset_max)) ask_condition8 = one_ask_price < (self.origin_ask_price * (1", "0 for num_level in range(1, self.ask_order_level + 1): total_ask_volume += getattr(self.last_tick, f\"ask_volume_{num_level}\") if", "0 for num_level in range(self.min_order_level, 0, -1): bid_price = getattr(self.last_tick, f\"bid_price_{num_level}\") if bid_price", "self.origin_bid_price == 0.00000001: self.origin_bid_price = vt_bid_price bid_condition0 = self.last_bid_price == 0.00000001 bid_condition1 =", "# min_ask_price = getattr(tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level > 0 else market_price # vt_ask_price", "0 self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0", "(self.total_bid_volume / 2): cancel_bid = True self.write_log(f\"---> 当前买单{self.vt_bid_price} 取消,因为之前的订单量发生了变化\") if cancel_bid: self.cancel_order(self.vt_bid_orderid) #", "0.99): self.bid_order_level = num_level break if self.bid_order_level > 0: total_bid_volume = 0 for", "self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True if not cancel_bid: total_bid_volume =", "setting[\"price_offset\"] self.price_offset_max = setting[\"price_offset_max\"] self.volume = setting[\"volume\"] self.max_volume_ratio = setting.get(\"max_volume_ratio\", 0) assert 0", "> 0 max_volume_ratio = self.max_volume_ratio market_price = (self.last_tick.ask_price_1 + self.last_tick.bid_price_1) / 2 if", "self.cancel_order(self.vt_bid_orderid) # self.bid_order_alive_tick = 0 def on_timer(self): \"\"\"\"\"\" if not self.last_tick: return if", "!= \"\": self.bid_order_alive_tick += 1 # if time to kill cancel_bid = False", "< vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) >", "order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if not", "decimal import Decimal from _datetime import datetime, timedelta from enum import Enum import", "def _init_market_accounts(self, active_vt_symbol): SYMBOL_SPLITTER = re.compile(r\"^(\\w+)[-:/]?(BTC|ETH|BNB|XRP|USDT|USDC|USDS|TUSD|PAX|DAI)$\") market_token_pair = active_vt_symbol.split('.')[0] active_market = active_vt_symbol.split('.')[1] if", "<= self.max_volume_ratio <= 1 self.interval = setting[\"interval\"] self.min_order_level = setting[\"min_order_level\"] self.min_order_volume = setting[\"min_order_volume\"]", "self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\") return use_max_volume = self.max_volume_ratio", "LiquidMiningAlgo(AlgoTemplate): \"\"\"\"\"\" display_name = \"交易所 流动性挖坑\" default_setting = { \"vt_symbol\": \"\", \"price_offset\": 0.05,", "> 0 else market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price ==", "if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif self.vt_bid_price <", "return self.timer_count = 0 self.write_log(f\"当前余额 {self.current_balance}, 持仓 {self.pos}\") if not self._update_current_balance(): self.write_log(f\"查询余额失败,上次余额: [{self.current_balance}]\")", "= max_volume self.last_ask_volume = round_to(volume - self.volumetick, self.volumetick) self.write_log(f\"流动性挖矿卖出价格: {vt_ask_price}, 量: {self.last_ask_volume}\") self.vt_ask_orderid", "market_price vt_bid_price = round_to(max_bid_price - self.pricetick, self.pricetick) if self.origin_bid_price == 0.00000001: self.origin_bid_price =", "!= \"\": self.ask_order_alive_tick += 1 # if time to kill cancel_ask = False", "self.last_ask_volume: one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\")", "- self.price_offset)) < vt_ask_price < (self.last_ask_price * (1 + self.price_offset)) ask_condition2 = vt_ask_price", "else max_volume * max_volume_ratio if volume >= max_volume: volume = max_volume self.last_bid_volume =", "num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 < ask_price", "\"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables = [ \"pos\",", "parse symbol {market_token_pair} failed\") return False self.market_vt_tokens = [ f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance", "f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif self.vt_ask_price", "ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"---> 流动性挖矿买入低价one_ask_price: {one_ask_price}, one_ask_volume: {one_ask_volume}\") self.buy(self.vt_symbol, one_ask_price, one_ask_volume)", "if total_bid_volume != self.last_bid_volume: one_bid_price = getattr(self.last_tick, f\"bid_price_1\") one_bid_volume = getattr(self.last_tick, f\"bid_volume_1\") max_bid_price", "True self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid", "setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio = setting[\"sell_max_ratio\"] self.buy_max_ratio = setting[\"buy_max_ratio\"]", "== self.vt_bid_orderid: if not order.is_active(): self.vt_bid_orderid = \"\" self.vt_bid_price = 0.0 self.put_variables_event() def", "self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price == 0.00000002 ask_condition1 = (self.last_ask_price * (1", "one_ask_price = getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if", "- self.price_offset)) < vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price", "量: {self.last_bid_volume}\") self.vt_bid_orderid = self.buy(self.vt_symbol, vt_bid_price, self.last_bid_volume) self.bid_order_alive_tick = 0 elif bid_condition8 and", "+ self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price = vt_ask_price ask_condition0 = self.last_ask_price", "cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume /", "True self.write_log(f\"当前卖单{self.vt_ask_price} 高于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif abs(self.total_ask_volume - total_ask_volume) > (self.total_ask_volume / 2): cancel_ask", "if self.enable_ioc and self.ask_order_alive_tick > self.ioc_intervel: self.write_log(f\"卖单{self.vt_ask_orderid}有效时间{self.ask_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_ask = True", "(1 + self.reward_ratio * 0.99): self.ask_order_level = num_level break if self.ask_order_level > 0:", "import Direction, Status, OrderType from vnpy.trader.object import AccountData, OrderData, TradeData, TickData from vnpy.trader.engine", "vt_bid_price < (self.last_bid_price * (1 + self.price_offset)) bid_condition2 = vt_bid_price < (self.origin_bid_price *", "= setting[\"min_order_volume\"] self.sell_max_volume = setting[\"sell_max_volume\"] self.buy_max_volume = setting[\"buy_max_volume\"] self.auto_trade_volume = setting[\"auto_trade_volume\"] self.sell_max_ratio =", "= getattr(self.last_tick, f\"ask_price_1\") one_ask_volume = getattr(self.last_tick, f\"ask_volume_1\") min_ask_price = getattr(self.last_tick, f\"ask_price_{self.ask_order_level}\") if self.ask_order_level", "2)) self.write_log(f\"---> 流动性挖矿买入condition1: {bid_condition1}, condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price", "self.sell(self.vt_symbol, vt_ask_price, self.last_ask_volume) self.ask_order_alive_tick = 0 elif ask_condition8 and one_ask_volume < self.auto_trade_volume: self.write_log(f\"--->", "0.00000002 self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0", "1, \"buy_max_ratio\": 1, \"reward_ratio\": 0.01, \"min_pos\": 50000, \"max_pos\": 50000, } variables = [", "= True if not cancel_bid: total_bid_volume = 0 for num_level in range(1, 6):", "self.vt_bid_orderid != \"\": self.bid_order_alive_tick += 1 # if time to kill cancel_bid =", "for num_level in range(self.min_order_level, 0, -1): ask_price = getattr(self.last_tick, f\"ask_price_{num_level}\") if 0 <", "else market_price vt_ask_price = round_to(min_ask_price + self.pricetick, self.pricetick) if self.origin_ask_price == 0.00000002: self.origin_ask_price", "condition2: {bid_condition2}\") if bid_condition0 or (bid_condition1 and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price =", "vnpy.trader.utility import round_to from vnpy.trader.constant import Direction, Status, OrderType from vnpy.trader.object import AccountData,", "1 # if time to kill cancel_bid = False if self.enable_ioc and self.bid_order_alive_tick", "bid_price = getattr(tick, f\"bid_price_{num_level}\") if bid_price > self.last_bid_price: total_bid_volume += getattr(tick, f\"bid_volume_{num_level}\") #", "== \"\": self.bid_order_level = 0 for num_level in range(self.min_order_level, 0, -1): bid_price =", "\"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters self.vt_symbol = setting[\"vt_symbol\"] self.price_offset = setting[\"price_offset\"] self.price_offset_max", "on_order(self, order: OrderData): \"\"\"\"\"\" if order.vt_orderid == self.vt_ask_orderid: if not order.is_active(): self.vt_ask_orderid =", "not order.is_active(): self.vt_ask_orderid = \"\" self.vt_ask_price = 0.0 elif order.vt_orderid == self.vt_bid_orderid: if", "self.buy_max_ratio / vt_bid_price if 0 < self.buy_max_volume < max_volume: max_volume = self.buy_max_volume min_volume", "self.price_offset_max * 2)) self.write_log(f\"---> 流动性挖矿卖出condition1: {ask_condition1}, condition2: {ask_condition2}\") if ask_condition0 or (ask_condition1 and", "self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\" random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume},", "import re import requests import time from vnpy.app.algo_trading import AlgoTemplate from vnpy.trader.utility import", "getattr(tick, f\"ask_price_1\") if self.vt_ask_price < vt_ask_price: cancel_ask = True self.write_log(f\"当前卖单{self.vt_ask_price} 低于最新卖{self.ask_order_level}价 {vt_ask_price},取消\") elif", "and ask_condition2): self.last_ask_price = vt_ask_price self.vt_ask_price = one_ask_price self.total_ask_volume = total_ask_volume max_volume =", "{self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).pricetick self.volumetick = self.algo_engine.main_engine.get_contract(self.vt_symbol).min_volume assert self.pricetick", "random.seed(time.time()) self.write_log(f\"开始流动性挖矿: {self.price_offset}, {self.price_offset_max}, {self.volume}, {self.interval}, {self.min_order_level}, {self.min_order_volume}, {self.sell_max_volume}, {self.buy_max_volume}, {self.auto_trade_volume}\") self.pricetick =", "if self.enable_ioc and self.bid_order_alive_tick > self.ioc_intervel: self.write_log(f\"买单{self.vt_bid_orderid}有效时间{self.bid_order_alive_tick} ticks > {self.ioc_intervel},取消\") cancel_bid = True", "total_ask_volume if self.min_order_volume > 0 and min_volume < self.min_order_volume: min_volume = self.min_order_volume volume", "+= 1 # if time to kill cancel_bid = False if self.enable_ioc and", "+ tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1 # if", "f\"bid_price_{self.bid_order_level}\") if self.bid_order_level > 0 else market_price # vt_bid_price = round_to(max_bid_price - self.pricetick,", "is not AccountData: return False self.current_balance[vt_token] = user_account.balance return True def on_start(self): \"\"\"\"\"\"", "= (tick.ask_price_1 + tick.bid_price_1) / 2 if self.vt_ask_orderid != \"\": self.ask_order_alive_tick += 1", "and bid_condition2): self.last_bid_price = vt_bid_price self.vt_bid_price = one_bid_price self.total_bid_volume = total_bid_volume max_volume =", "self.last_bid_price = 0.00000001 self.last_ask_volume = 0.0 self.last_bid_volume = 0.0 self.total_ask_volume = 0.0 self.total_bid_volume", "getattr(tick, f\"bid_price_1\") if self.vt_bid_price > vt_bid_price: cancel_bid = True self.write_log(f\"当前买单{self.vt_bid_price} 高于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif", "f\"{active_market}.{token_pair_match.group(1)}\", f\"{active_market}.{token_pair_match.group(2)}\" ] self.current_balance = {} self._update_current_balance() def _update_current_balance(self): for vt_token in self.market_vt_tokens:", "self.write_log(f\"当前买单{self.vt_bid_price} 低于最新买{self.bid_order_level}价 {vt_bid_price},取消\") elif abs(self.total_bid_volume - total_bid_volume) > (self.total_bid_volume / 2): cancel_bid =", "algo_engine: BaseEngine, algo_name: str, setting: dict ): \"\"\"\"\"\" super().__init__(algo_engine, algo_name, setting) # Parameters" ]